hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790733768ca253dfe6f8a855aeaf2d0d95c3593b
| 4,057
|
py
|
Python
|
tests/test_bibtexparser.py
|
goerz/bibdeskparser
|
4f60f9960f6f0156c2f3c89033065c4e121800ab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bibtexparser.py
|
goerz/bibdeskparser
|
4f60f9960f6f0156c2f3c89033065c4e121800ab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bibtexparser.py
|
goerz/bibdeskparser
|
4f60f9960f6f0156c2f3c89033065c4e121800ab
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import bibdeskparser
from bibdeskparser.bparser import BibTexParser
from tempfile import TemporaryFile
class TestbibdeskparserParserMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
input_bom_file_path = 'tests/data/book_bom.bib'
entries_expected = [
{
'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.',
}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_str(self):
parser = BibTexParser()
with open(self.input_bom_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_bytes(self):
parser = BibTexParser()
with open(self.input_bom_file_path, 'rb') as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = bibdeskparser.loads(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
class TestBibtexpardserWriteMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
expected = """@book{Bird1987,
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
edition = {2},
publisher = {Wiley Edition},
title = {Dynamics of Polymeric Liquid},
volume = {1},
year = {1987}
}
"""
def test_write_str(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
result = bibdeskparser.dumps(bibtex_database)
self.assertEqual(result, self.expected)
def test_write_file(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
with TemporaryFile(mode='w+') as bibtex_out_file:
bibdeskparser.dump(bibtex_database, bibtex_out_file)
bibtex_out_file.seek(0)
bibtex_out_str = bibtex_out_file.read()
self.assertEqual(bibtex_out_str, self.expected)
class TestbibdeskparserFieldNames(unittest.TestCase):
input_file_path = 'tests/data/fieldname.bib'
entries_expected = [
{'ENTRYTYPE': 'book', 'ID': 'Bird1987', 'dc.date': '2004-01'}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
if __name__ == '__main__':
unittest.main()
| 35.587719
| 72
| 0.676608
|
import unittest
import bibdeskparser
from bibdeskparser.bparser import BibTexParser
from tempfile import TemporaryFile
class TestbibdeskparserParserMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
input_bom_file_path = 'tests/data/book_bom.bib'
entries_expected = [
{
'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.',
}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_str(self):
parser = BibTexParser()
with open(self.input_bom_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_bytes(self):
parser = BibTexParser()
with open(self.input_bom_file_path, 'rb') as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = bibdeskparser.loads(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
class TestBibtexpardserWriteMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
expected = """@book{Bird1987,
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
edition = {2},
publisher = {Wiley Edition},
title = {Dynamics of Polymeric Liquid},
volume = {1},
year = {1987}
}
"""
def test_write_str(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
result = bibdeskparser.dumps(bibtex_database)
self.assertEqual(result, self.expected)
def test_write_file(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
with TemporaryFile(mode='w+') as bibtex_out_file:
bibdeskparser.dump(bibtex_database, bibtex_out_file)
bibtex_out_file.seek(0)
bibtex_out_str = bibtex_out_file.read()
self.assertEqual(bibtex_out_str, self.expected)
class TestbibdeskparserFieldNames(unittest.TestCase):
input_file_path = 'tests/data/fieldname.bib'
entries_expected = [
{'ENTRYTYPE': 'book', 'ID': 'Bird1987', 'dc.date': '2004-01'}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790733cbfdab68b44e4cda0ec3e6b492d9cf4166
| 6,208
|
py
|
Python
|
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 5
|
2022-01-05T00:41:46.000Z
|
2022-03-21T07:22:58.000Z
|
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 4
|
2020-04-23T19:00:28.000Z
|
2021-09-28T18:14:58.000Z
|
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 2
|
2022-03-20T17:35:44.000Z
|
2022-03-21T18:30:31.000Z
|
from ... import exc
from ... import util
from ...sql.base import _exclusive_against
from ...sql.base import _generative
from ...sql.base import ColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
"""MySQL-specific implementation of INSERT.
Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE.
The :class:`~.mysql.Insert` object is created using the
:func:`sqlalchemy.dialects.mysql.insert` function.
.. versionadded:: 1.2
"""
stringify_dialect = "mysql"
inherit_cache = False
@property
def inserted(self):
"""Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
that would be inserted, via a special function called ``VALUES()``.
This attribute provides all columns in this row to be referenceable
such that they will render within a ``VALUES()`` function inside the
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
so as not to conflict with the existing
:meth:`_expression.Insert.values` method.
.. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance
of :class:`_expression.ColumnCollection`, which provides an
interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.inserted.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.inserted["column name"]`` or
``stmt.inserted["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` - example of how
to use :attr:`_expression.Insert.inserted`
"""
return self.inserted_alias.columns
@util.memoized_property
def inserted_alias(self):
return alias(self.table, name="inserted")
@_generative
@_exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already "
"has an ON DUPLICATE KEY clause present"
},
)
def on_duplicate_key_update(self, *args, **kw):
r"""
Specifies the ON DUPLICATE KEY UPDATE clause.
:param \**kw: Column keys linked to UPDATE values. The
values may be any SQL expression or supported literal Python
values.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY UPDATE
style of UPDATE, unless values are manually specified here.
:param \*args: As an alternative to passing key/value parameters,
a dictionary or list of 2-tuples can be passed as a single positional
argument.
Passing a single dictionary is equivalent to the keyword argument
form::
insert().on_duplicate_key_update({"name": "some name"})
Passing a list of 2-tuples indicates that the parameter assignments
in the UPDATE clause should be ordered as sent, in a manner similar
to that described for the :class:`_expression.Update`
construct overall
in :ref:`updates_order_parameters`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
ordering.
.. versionadded:: 1.2
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update`
"""
if args and kw:
raise exc.ArgumentError(
"Can't pass kwargs and positional arguments simultaneously"
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary or list of tuples "
"is accepted positionally."
)
values = args[0]
else:
values = kw
inserted_alias = getattr(self, "inserted_alias", None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values)
insert = public_factory(
Insert, ".dialects.mysql.insert", ".dialects.mysql.Insert"
)
class OnDuplicateClause(ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering = None
stringify_dialect = "mysql"
def __init__(self, inserted_alias, update):
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
# in this case since we are not disambiguating from other use cases as
# we are in Update.values().
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if isinstance(update, dict):
if not update:
raise ValueError(
"update parameter dictionary must not be empty"
)
elif isinstance(update, ColumnCollection):
update = dict(update)
else:
raise ValueError(
"update parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update = update
| 35.474286
| 84
| 0.632893
|
from ... import exc
from ... import util
from ...sql.base import _exclusive_against
from ...sql.base import _generative
from ...sql.base import ColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
stringify_dialect = "mysql"
inherit_cache = False
@property
def inserted(self):
return self.inserted_alias.columns
@util.memoized_property
def inserted_alias(self):
return alias(self.table, name="inserted")
@_generative
@_exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already "
"has an ON DUPLICATE KEY clause present"
},
)
def on_duplicate_key_update(self, *args, **kw):
if args and kw:
raise exc.ArgumentError(
"Can't pass kwargs and positional arguments simultaneously"
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary or list of tuples "
"is accepted positionally."
)
values = args[0]
else:
values = kw
inserted_alias = getattr(self, "inserted_alias", None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values)
insert = public_factory(
Insert, ".dialects.mysql.insert", ".dialects.mysql.Insert"
)
class OnDuplicateClause(ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering = None
stringify_dialect = "mysql"
def __init__(self, inserted_alias, update):
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if isinstance(update, dict):
if not update:
raise ValueError(
"update parameter dictionary must not be empty"
)
elif isinstance(update, ColumnCollection):
update = dict(update)
else:
raise ValueError(
"update parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update = update
| true
| true
|
790735b5a74b245acba8722d896c0d07a2e04020
| 1,543
|
py
|
Python
|
deepcell/applications/multiplex_segmentation.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 250
|
2018-09-19T23:55:06.000Z
|
2022-03-30T02:20:52.000Z
|
deepcell/applications/multiplex_segmentation.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 251
|
2018-09-21T17:09:43.000Z
|
2022-02-28T19:04:50.000Z
|
deepcell/applications/multiplex_segmentation.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 64
|
2018-11-29T15:22:15.000Z
|
2022-03-21T03:37:43.000Z
|
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiplex segmentation application.
Deprecated in favor of ``deepcell.applications.Mesmer`` instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.applications.mesmer import Mesmer as MultiplexSegmentation
| 42.861111
| 80
| 0.747894
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.applications.mesmer import Mesmer as MultiplexSegmentation
| true
| true
|
7907365d1d7dfdab7e99a00ac86d16b236ea1b0a
| 16,564
|
py
|
Python
|
tracpro/profiles/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 5
|
2015-07-21T15:58:31.000Z
|
2019-09-14T22:34:00.000Z
|
tracpro/profiles/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 197
|
2015-03-24T15:26:04.000Z
|
2017-11-28T19:24:37.000Z
|
tracpro/profiles/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 10
|
2015-03-24T12:26:36.000Z
|
2017-02-21T13:08:57.000Z
|
from __future__ import absolute_import, unicode_literals
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from tracpro.test.cases import TracProDataTest
class ManageUserCreateTest(TracProDataTest):
def test_create_as_non_superuser(self):
# Non-superuser cannot use this view
url = reverse('profiles.admin_create')
self.login(self.admin) # Not a superuser
# Post something that would be an error (empty form) and would be a 200
# status if we had access.
response = self.url_post('unicef', url, dict())
# We get redirected to login
self.assertEqual(response.status_code, 302, response)
self.assertIn('login', response['Location'])
def test_create_with_fields_missing(self):
# An error case
url = reverse('profiles.admin_create')
self.login(self.superuser)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200, response)
error_dict = response.context['form'].errors
self.assertEqual(4, len(error_dict), repr(error_dict))
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
self.assertFormError(
response, 'form', '__all__',
'Email address already taken.' # FIXME: this error makes no sense in this context
)
def test_create_successfully(self):
# create non-superuser
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
def test_create_superuser(self):
# create superuser
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertTrue(user.is_superuser)
class ManageUserUpdateTest(TracProDataTest):
def test_update_as_non_superuser(self):
# Non-superuser cannot use this view
self.login(self.admin)
url = reverse('profiles.admin_update', args=[self.user1.pk])
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 302)
self.assertIn('login', response['Location'])
def test_update(self):
# Change non-superuser to superuser, change their password, etc etc.
self.login(self.superuser)
url = reverse('profiles.admin_update', args=[self.user1.pk])
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'new_password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': False,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertFalse(user.is_active)
self.assertTrue(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
# and back. changing password optional.
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
# 'password': "abc123xy",
# 'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
class UserCRUDLTest(TracProDataTest):
def test_create(self):
url = reverse('profiles.user_create')
# log in as an org administrator
self.login(self.admin)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
# submit again with all required fields but invalid password
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'password',
"Ensure this value has at least 8 characters (it has 3).")
# submit again with valid password but mismatched confirmation
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
# submit again with valid password and confirmation
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check new user and profile
user = User.objects.get(email="mo@trac.com")
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertEqual(user.email, "mo@trac.com")
self.assertEqual(user.username, "mo@trac.com")
# try again with same email address
data = {
'full_name': "Mo Polls II",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
def test_update(self):
url = reverse('profiles.user_update', args=[self.user1.pk])
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# can assign to any org region
self.assertEqual(len(response.context['form'].fields['regions'].choices), 3)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all fields entered
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@chat.com")
self.assertEqual(user.username, "mo2@chat.com")
self.assertEqual(list(user.regions.all()), [self.region3])
# submit again for good measure
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# try giving user someone else's email address
data = {
'full_name': "Morris",
'email': "eric@nyaruka.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
# check de-activating user
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [],
'is_active': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check user object is inactive
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.is_active)
def test_read(self):
# log in as an org administrator
self.login(self.admin)
# view our own profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_self'))
# view other user's profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user1.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_update', args=[self.user1.pk]))
# try to view user from other org
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user3.pk]))
self.assertEqual(response.status_code, 404)
# log in as a user
self.login(self.user1)
# view other user's profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.context['edit_button_url'])
def test_list(self):
url = reverse('profiles.user_list')
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# log in as a non-administrator
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 2)
def test_self(self):
url = reverse('profiles.user_self')
# try as unauthenticated
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# try as superuser (doesn't have a chat profile)
self.login(self.superuser)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 404)
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# log in as a user
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all required fields entered
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@trac.com")
self.assertEqual(user.username, "mo2@trac.com")
self.assertEqual(list(user.regions.all()), [self.region1])
# submit with all required fields entered and password fields
old_password_hash = user.password
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'new_password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check password has been changed
user = User.objects.get(pk=self.user1.pk)
self.assertNotEqual(user.password, old_password_hash)
# check when user is being forced to change their password
old_password_hash = user.password
self.user1.profile.change_password = True
self.user1.profile.save()
# submit without password
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'password',
'This field is required.')
# submit again with password but no confirmation
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
# submit again with password and confirmation
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check password has changed and no longer has to be changed
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.profile.change_password)
self.assertNotEqual(user.password, old_password_hash)
class DashUserCRUDLTest(TracProDataTest):
def test_login(self):
url = reverse('users.user_login')
# login without org subdomain
response = self.url_post(None, url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://testserver/',
fetch_redirect_response=False)
# login with org subdomain
response = self.url_post('unicef', url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://unicef.testserver/',
fetch_redirect_response=False)
| 36.324561
| 94
| 0.594482
|
from __future__ import absolute_import, unicode_literals
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from tracpro.test.cases import TracProDataTest
class ManageUserCreateTest(TracProDataTest):
def test_create_as_non_superuser(self):
url = reverse('profiles.admin_create')
self.login(self.admin)
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 302, response)
self.assertIn('login', response['Location'])
def test_create_with_fields_missing(self):
url = reverse('profiles.admin_create')
self.login(self.superuser)
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200, response)
error_dict = response.context['form'].errors
self.assertEqual(4, len(error_dict), repr(error_dict))
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
self.assertFormError(
response, 'form', '__all__',
'Email address already taken.'
)
def test_create_successfully(self):
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
def test_create_superuser(self):
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertTrue(user.is_superuser)
class ManageUserUpdateTest(TracProDataTest):
def test_update_as_non_superuser(self):
self.login(self.admin)
url = reverse('profiles.admin_update', args=[self.user1.pk])
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 302)
self.assertIn('login', response['Location'])
def test_update(self):
self.login(self.superuser)
url = reverse('profiles.admin_update', args=[self.user1.pk])
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'new_password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': False,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertFalse(user.is_active)
self.assertTrue(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
class UserCRUDLTest(TracProDataTest):
def test_create(self):
url = reverse('profiles.user_create')
self.login(self.admin)
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'password',
"Ensure this value has at least 8 characters (it has 3).")
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
# submit again with valid password and confirmation
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check new user and profile
user = User.objects.get(email="mo@trac.com")
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertEqual(user.email, "mo@trac.com")
self.assertEqual(user.username, "mo@trac.com")
# try again with same email address
data = {
'full_name': "Mo Polls II",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
def test_update(self):
url = reverse('profiles.user_update', args=[self.user1.pk])
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# can assign to any org region
self.assertEqual(len(response.context['form'].fields['regions'].choices), 3)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all fields entered
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@chat.com")
self.assertEqual(user.username, "mo2@chat.com")
self.assertEqual(list(user.regions.all()), [self.region3])
# submit again for good measure
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# try giving user someone else's email address
data = {
'full_name': "Morris",
'email': "eric@nyaruka.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [],
'is_active': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.is_active)
def test_read(self):
self.login(self.admin)
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_self'))
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user1.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_update', args=[self.user1.pk]))
# try to view user from other org
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user3.pk]))
self.assertEqual(response.status_code, 404)
# log in as a user
self.login(self.user1)
# view other user's profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.context['edit_button_url'])
def test_list(self):
url = reverse('profiles.user_list')
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 2)
def test_self(self):
url = reverse('profiles.user_self')
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
self.login(self.superuser)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 404)
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# log in as a user
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all required fields entered
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@trac.com")
self.assertEqual(user.username, "mo2@trac.com")
self.assertEqual(list(user.regions.all()), [self.region1])
# submit with all required fields entered and password fields
old_password_hash = user.password
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'new_password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check password has been changed
user = User.objects.get(pk=self.user1.pk)
self.assertNotEqual(user.password, old_password_hash)
# check when user is being forced to change their password
old_password_hash = user.password
self.user1.profile.change_password = True
self.user1.profile.save()
# submit without password
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'password',
'This field is required.')
# submit again with password but no confirmation
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.profile.change_password)
self.assertNotEqual(user.password, old_password_hash)
class DashUserCRUDLTest(TracProDataTest):
def test_login(self):
url = reverse('users.user_login')
response = self.url_post(None, url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://testserver/',
fetch_redirect_response=False)
response = self.url_post('unicef', url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://unicef.testserver/',
fetch_redirect_response=False)
| true
| true
|
7907371dca369689eba216a8a443d51a9ba2bc03
| 8,408
|
py
|
Python
|
ml_editor/ml_editor.py
|
VestiDev/ml-powered-applications-2020-book
|
4dcfdeb42cdce47406985dcbf8a0533cc086cd20
|
[
"MIT"
] | 542
|
2019-06-11T20:15:11.000Z
|
2022-03-30T00:30:05.000Z
|
ml_editor/ml_editor.py
|
VestiDev/ml-powered-applications-2020-book
|
4dcfdeb42cdce47406985dcbf8a0533cc086cd20
|
[
"MIT"
] | 84
|
2020-06-18T13:32:05.000Z
|
2021-08-02T13:18:27.000Z
|
ml_editor/ml_editor.py
|
VestiDev/ml-powered-applications-2020-book
|
4dcfdeb42cdce47406985dcbf8a0533cc086cd20
|
[
"MIT"
] | 180
|
2019-04-15T01:47:32.000Z
|
2022-03-13T13:58:04.000Z
|
import argparse
import logging
import sys
import pyphen
import nltk
pyphen.language_fallback("en_US")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(logging.DEBUG)
logger.addHandler(console_out)
def parse_arguments():
"""
Simple argument parser for the command line
:return: The text to be edited
"""
parser = argparse.ArgumentParser(description="Receive text to be edited")
parser.add_argument("text", metavar="input text", type=str)
args = parser.parse_args()
return args.text
def clean_input(text):
"""
Text sanitization function
:param text: User input text
:return: Sanitized text, without non ascii characters
"""
# To keep things simple at the start, let's only keep ASCII characters
return str(text.encode().decode("ascii", errors="ignore"))
def preprocess_input(text):
"""
Tokenizes text that has been sainitized
:param text: Sanitized text
:return: Text ready to be fed to analysis, by having sentences and words tokenized
"""
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return tokens
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences):
"""
Computes readability score from summary statistics
:param total_syllables: number of syllables in input text
:param total_words: number of words in input text
:param total_sentences: number of sentences in input text
:return: A readability score: the lower the score, the more complex the text is deemed to be
"""
return (
206.85
- 1.015 * (total_words / total_sentences)
- 84.6 * (total_syllables / total_words)
)
def get_reading_level_from_flesch(flesch_score):
"""
Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
:param flesch_score:
:return: A reading level and difficulty for a given flesch score
"""
if flesch_score < 30:
return "Very difficult to read"
elif flesch_score < 50:
return "Difficult to read"
elif flesch_score < 60:
return "Fairly difficult to read"
elif flesch_score < 70:
return "Plain English"
elif flesch_score < 80:
return "Fairly easy to read"
elif flesch_score < 90:
return "Easy to read"
else:
return "Very easy to read"
def compute_average_word_length(tokens):
"""
Calculate word length for a sentence
:param tokens: a list of words
:return: The average length of words in this list
"""
word_lengths = [len(word) for word in tokens]
return sum(word_lengths) / len(word_lengths)
def compute_total_average_word_length(sentence_list):
"""
Calculate average word length for multiple sentences
:param sentence_list: a list of sentences, each being a list of words
:return: The average length of words in this list of sentences
"""
lengths = [compute_average_word_length(tokens) for tokens in sentence_list]
return sum(lengths) / len(lengths)
def compute_total_unique_words_fraction(sentence_list):
"""
Compute fraction os unique words
:param sentence_list: a list of sentences, each being a list of words
:return: the fraction of unique words in the sentences
"""
all_words = [word for word_list in sentence_list for word in word_list]
unique_words = set(all_words)
return len(unique_words) / len(all_words)
def count_word_usage(tokens, word_list):
"""
Counts occurrences of a given list of words
:param tokens: a list of tokens for one sentence
:param word_list: a list of words to search for
:return: the number of times the words appear in the list
"""
return len([word for word in tokens if word.lower() in word_list])
def count_word_syllables(word):
"""
Count syllables in a word
:param word: a one word string
:return: the number of syllables according to pyphen
"""
dic = pyphen.Pyphen(lang="en_US")
# this returns our word, with hyphens ("-") inserted in between each syllable
hyphenated = dic.inserted(word)
return len(hyphenated.split("-"))
def count_sentence_syllables(tokens):
"""
Count syllables in a sentence
:param tokens: a list of words and potentially punctuation
:return: the number of syllables in the sentence
"""
# Our tokenizer leaves punctuation as a separate word, so we filter for it here
punctuation = ".,!?/"
return sum(
[
count_word_syllables(word)
for word in tokens
if word not in punctuation
]
)
def count_total_syllables(sentence_list):
"""
Count syllables in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of syllables in the sentences
"""
return sum(
[count_sentence_syllables(sentence) for sentence in sentence_list]
)
def count_words_per_sentence(sentence_tokens):
"""
Count words in a sentence
:param sentence_tokens: a list of words and potentially punctuation
:return: the number of words in the sentence
"""
punctuation = ".,!?/"
return len([word for word in sentence_tokens if word not in punctuation])
def count_total_words(sentence_list):
"""
Count words in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of words in the sentences
"""
return sum(
[count_words_per_sentence(sentence) for sentence in sentence_list]
)
def get_suggestions(sentence_list):
"""
Returns a string containing our suggestions
:param sentence_list: a list of sentences, each being a list of words
:return: suggestions to improve the input
"""
told_said_usage = sum(
(count_word_usage(tokens, ["told", "said"]) for tokens in sentence_list)
)
but_and_usage = sum(
(count_word_usage(tokens, ["but", "and"]) for tokens in sentence_list)
)
wh_adverbs_usage = sum(
(
count_word_usage(
tokens,
[
"when",
"where",
"why",
"whence",
"whereby",
"wherein",
"whereupon",
],
)
for tokens in sentence_list
)
)
result_str = ""
adverb_usage = "Adverb usage: %s told/said, %s but/and, %s wh adverbs" % (
told_said_usage,
but_and_usage,
wh_adverbs_usage,
)
result_str += adverb_usage
average_word_length = compute_total_average_word_length(sentence_list)
unique_words_fraction = compute_total_unique_words_fraction(sentence_list)
word_stats = "Average word length %.2f, fraction of unique words %.2f" % (
average_word_length,
unique_words_fraction,
)
# Using HTML break to later display on a webapp
result_str += "<br/>"
result_str += word_stats
number_of_syllables = count_total_syllables(sentence_list)
number_of_words = count_total_words(sentence_list)
number_of_sentences = len(sentence_list)
syllable_counts = "%d syllables, %d words, %d sentences" % (
number_of_syllables,
number_of_words,
number_of_sentences,
)
result_str += "<br/>"
result_str += syllable_counts
flesch_score = compute_flesch_reading_ease(
number_of_syllables, number_of_words, number_of_sentences
)
flesch = "%d syllables, %.2f flesch score: %s" % (
number_of_syllables,
flesch_score,
get_reading_level_from_flesch(flesch_score),
)
result_str += "<br/>"
result_str += flesch
return result_str
def get_recommendations_from_input(txt):
"""
Cleans, preprocesses, and generates heuristic suggestion for input string
:param txt: Input text
:return: Suggestions for a given text input
"""
processed = clean_input(txt)
tokenized_sentences = preprocess_input(processed)
suggestions = get_suggestions(tokenized_sentences)
return suggestions
if __name__ == "__main__":
input_text = parse_arguments()
print(get_recommendations_from_input(input_text))
| 30.244604
| 96
| 0.673525
|
import argparse
import logging
import sys
import pyphen
import nltk
pyphen.language_fallback("en_US")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(logging.DEBUG)
logger.addHandler(console_out)
def parse_arguments():
parser = argparse.ArgumentParser(description="Receive text to be edited")
parser.add_argument("text", metavar="input text", type=str)
args = parser.parse_args()
return args.text
def clean_input(text):
return str(text.encode().decode("ascii", errors="ignore"))
def preprocess_input(text):
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return tokens
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences):
return (
206.85
- 1.015 * (total_words / total_sentences)
- 84.6 * (total_syllables / total_words)
)
def get_reading_level_from_flesch(flesch_score):
if flesch_score < 30:
return "Very difficult to read"
elif flesch_score < 50:
return "Difficult to read"
elif flesch_score < 60:
return "Fairly difficult to read"
elif flesch_score < 70:
return "Plain English"
elif flesch_score < 80:
return "Fairly easy to read"
elif flesch_score < 90:
return "Easy to read"
else:
return "Very easy to read"
def compute_average_word_length(tokens):
word_lengths = [len(word) for word in tokens]
return sum(word_lengths) / len(word_lengths)
def compute_total_average_word_length(sentence_list):
lengths = [compute_average_word_length(tokens) for tokens in sentence_list]
return sum(lengths) / len(lengths)
def compute_total_unique_words_fraction(sentence_list):
all_words = [word for word_list in sentence_list for word in word_list]
unique_words = set(all_words)
return len(unique_words) / len(all_words)
def count_word_usage(tokens, word_list):
return len([word for word in tokens if word.lower() in word_list])
def count_word_syllables(word):
dic = pyphen.Pyphen(lang="en_US")
# this returns our word, with hyphens ("-") inserted in between each syllable
hyphenated = dic.inserted(word)
return len(hyphenated.split("-"))
def count_sentence_syllables(tokens):
# Our tokenizer leaves punctuation as a separate word, so we filter for it here
punctuation = ".,!?/"
return sum(
[
count_word_syllables(word)
for word in tokens
if word not in punctuation
]
)
def count_total_syllables(sentence_list):
return sum(
[count_sentence_syllables(sentence) for sentence in sentence_list]
)
def count_words_per_sentence(sentence_tokens):
punctuation = ".,!?/"
return len([word for word in sentence_tokens if word not in punctuation])
def count_total_words(sentence_list):
return sum(
[count_words_per_sentence(sentence) for sentence in sentence_list]
)
def get_suggestions(sentence_list):
told_said_usage = sum(
(count_word_usage(tokens, ["told", "said"]) for tokens in sentence_list)
)
but_and_usage = sum(
(count_word_usage(tokens, ["but", "and"]) for tokens in sentence_list)
)
wh_adverbs_usage = sum(
(
count_word_usage(
tokens,
[
"when",
"where",
"why",
"whence",
"whereby",
"wherein",
"whereupon",
],
)
for tokens in sentence_list
)
)
result_str = ""
adverb_usage = "Adverb usage: %s told/said, %s but/and, %s wh adverbs" % (
told_said_usage,
but_and_usage,
wh_adverbs_usage,
)
result_str += adverb_usage
average_word_length = compute_total_average_word_length(sentence_list)
unique_words_fraction = compute_total_unique_words_fraction(sentence_list)
word_stats = "Average word length %.2f, fraction of unique words %.2f" % (
average_word_length,
unique_words_fraction,
)
# Using HTML break to later display on a webapp
result_str += "<br/>"
result_str += word_stats
number_of_syllables = count_total_syllables(sentence_list)
number_of_words = count_total_words(sentence_list)
number_of_sentences = len(sentence_list)
syllable_counts = "%d syllables, %d words, %d sentences" % (
number_of_syllables,
number_of_words,
number_of_sentences,
)
result_str += "<br/>"
result_str += syllable_counts
flesch_score = compute_flesch_reading_ease(
number_of_syllables, number_of_words, number_of_sentences
)
flesch = "%d syllables, %.2f flesch score: %s" % (
number_of_syllables,
flesch_score,
get_reading_level_from_flesch(flesch_score),
)
result_str += "<br/>"
result_str += flesch
return result_str
def get_recommendations_from_input(txt):
processed = clean_input(txt)
tokenized_sentences = preprocess_input(processed)
suggestions = get_suggestions(tokenized_sentences)
return suggestions
if __name__ == "__main__":
input_text = parse_arguments()
print(get_recommendations_from_input(input_text))
| true
| true
|
790737c2a75aeb7a3a2829aa5090512eaf708739
| 21,588
|
py
|
Python
|
pychron/dvc/meta_repo.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | null | null | null |
pychron/dvc/meta_repo.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/dvc/meta_repo.py
|
UManPychron/pychron
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
# ============= enthought library imports =======================
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
# assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but
# will not be resolved now 8/26/18.
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
# p = os.path.join(root, add_extension(name))
# def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True):
# root = os.path.join(paths.meta_root, 'irradiation_holders')
# if not os.path.isdir(root):
# os.mkdir(root)
# p = os.path.join(root, add_extension(name))
#
# if not os.path.isfile(p) or overwrite:
# with open(p, 'w') as wfile:
# holes = list(iter_geom(blob))
# n = len(holes)
# wfile.write('{},0.0175\n'.format(n))
# for idx, (x, y, r) in holes:
# wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r))
# if add:
# self.add(p, commit=commit)
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
def get_cocktail_irradiation(self):
"""
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
"""
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
# this was a temporary fix and likely can be removed
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
# get most recent sensitivity
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
# @cached('clear_cache')
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
# print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
return pname, ip
# @cached('clear_cache')
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
# private
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
# ============= EOF =============================================
| 34.158228
| 116
| 0.564712
|
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
def get_cocktail_irradiation(self):
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
return pname, ip
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
| true
| true
|
790737db1381878cbc695a4d2c9de9929b8ed5ff
| 16,969
|
py
|
Python
|
tests/test_ec2/test_tags.py
|
monty16597/moto
|
cf2e6fc6dffedc34aa9b70d820f8f20908a52a8f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ec2/test_tags.py
|
monty16597/moto
|
cf2e6fc6dffedc34aa9b70d820f8f20908a52a8f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ec2/test_tags.py
|
monty16597/moto
|
cf2e6fc6dffedc34aa9b70d820f8f20908a52a8f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import pytest
import itertools
import boto
import boto3
from botocore.exceptions import ClientError
from boto.exception import EC2ResponseError
from boto.ec2.instance import Reservation
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
import pytest
from tests import EXAMPLE_AMI_ID
@mock_ec2_deprecated
def test_add_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.add_tag("a key", "some value", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.add_tag("a key", "some value")
chain = itertools.chain.from_iterable
existing_instances = list(
chain([res.instances for res in conn.get_all_reservations()])
)
existing_instances.should.have.length_of(1)
existing_instance = existing_instances[0]
existing_instance.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_remove_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
with pytest.raises(EC2ResponseError) as ex:
instance.remove_tag("a key", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
instance.add_tag("a key", "some value")
conn.get_all_tags().should.have.length_of(1)
instance.remove_tag("a key", "some value")
@mock_ec2_deprecated
def test_get_all_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_with_special_characters():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some<> value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some<> value")
@mock_ec2_deprecated
def test_create_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {
"a key": "some value",
"another key": "some other value",
"blank key": "",
}
with pytest.raises(EC2ResponseError) as ex:
conn.create_tags(instance.id, tag_dict, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
conn.create_tags(instance.id, tag_dict)
tags = conn.get_all_tags()
set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags]))
set([tag_dict[key] for key in tag_dict]).should.equal(
set([tag.value for tag in tags])
)
@mock_ec2_deprecated
def test_tag_limit_exceeded():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {}
for i in range(51):
tag_dict["{0:02d}".format(i + 1)] = ""
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
instance.add_tag("a key", "a value")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
tags = conn.get_all_tags()
tag = tags[0]
tags.should.have.length_of(1)
tag.name.should.equal("a key")
tag.value.should.equal("a value")
@mock_ec2_deprecated
def test_invalid_parameter_tag_null():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as cm:
instance.add_tag("a key", None)
cm.value.code.should.equal("InvalidParameterValue")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_invalid_id():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("ami-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("blah-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_get_all_tags_resource_id_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-id": instance.id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-id": image_id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_resource_type_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-type": "instance"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-type": "image"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_key_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"key": "an instance key"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_value_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("an instance key", "some other value")
reservation_c = conn.run_instances(EXAMPLE_AMI_ID)
instance_c = reservation_c.instances[0]
instance_c.add_tag("an instance key", "other value*")
reservation_d = conn.run_instances(EXAMPLE_AMI_ID)
instance_d = reservation_d.instances[0]
instance_d.add_tag("an instance key", "other value**")
reservation_e = conn.run_instances(EXAMPLE_AMI_ID)
instance_e = reservation_e.instances[0]
instance_e.add_tag("an instance key", "other value*?")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"value": "some value"})
tags.should.have.length_of(2)
tags = conn.get_all_tags(filters={"value": "some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value*"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": r"*value\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\?"})
tags.should.have.length_of(1)
@mock_ec2_deprecated
def test_retrieved_instances_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id)
conn.create_tags([instance.id], tags_to_be_set)
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
retrieved_tags = instance.tags
# Cleanup of instance
conn.terminate_instances([instances[0].id])
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_volumes_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
conn.create_tags([volume.id], tags_to_be_set)
# Fetch the volume again
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
retrieved_tags = volume.tags
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_snapshots_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
volume = conn.create_volume(80, "eu-west-1a")
snapshot = conn.create_snapshot(volume.id)
conn.create_tags([snapshot.id], tags_to_be_set)
# Fetch the snapshot again
all_snapshots = conn.get_all_snapshots()
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
retrieved_tags = snapshot.tags
conn.delete_snapshot(snapshot.id)
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_filter_instances_by_wildcard_tags():
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance_a = reservation.instances[0]
instance_a.add_tag("Key1", "Value1")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("Key1", "Value2")
reservations = conn.get_all_reservations(filters={"tag:Key1": "Value*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-key": "Key*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-value": "Value*"})
reservations.should.have.length_of(2)
@mock_ec2
def test_create_volume_with_tags():
client = boto3.client("ec2", "us-west-2")
response = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)
assert response["Tags"][0]["Key"] == "TEST_TAG"
@mock_ec2
def test_create_snapshot_with_tags():
client = boto3.client("ec2", "us-west-2")
volume_id = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)["VolumeId"]
snapshot = client.create_snapshot(
VolumeId=volume_id,
TagSpecifications=[
{
"ResourceType": "snapshot",
"Tags": [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}],
}
],
)
expected_tags = [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}]
assert snapshot["Tags"] == expected_tags
@mock_ec2
def test_create_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# create tag with empty resource
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_delete_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# delete tag with empty resource
with pytest.raises(ClientError) as ex:
client.delete_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_retrieve_resource_with_multiple_tags():
ec2 = boto3.resource("ec2", region_name="us-west-1")
blue, green = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
ec2.create_tags(
Resources=[blue.instance_id],
Tags=[
{"Key": "environment", "Value": "blue"},
{"Key": "application", "Value": "api"},
],
)
ec2.create_tags(
Resources=[green.instance_id],
Tags=[
{"Key": "environment", "Value": "green"},
{"Key": "application", "Value": "api"},
],
)
green_instances = list(ec2.instances.filter(Filters=(get_filter("green"))))
green_instances.should.equal([green])
blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue"))))
blue_instances.should.equal([blue])
def get_filter(color):
return [
{"Name": "tag-key", "Values": ["application"]},
{"Name": "tag-value", "Values": ["api"]},
{"Name": "tag-key", "Values": ["environment"]},
{"Name": "tag-value", "Values": [color]},
]
| 33.60198
| 137
| 0.687194
|
from __future__ import unicode_literals
import pytest
import itertools
import boto
import boto3
from botocore.exceptions import ClientError
from boto.exception import EC2ResponseError
from boto.ec2.instance import Reservation
import sure
from moto import mock_ec2_deprecated, mock_ec2
import pytest
from tests import EXAMPLE_AMI_ID
@mock_ec2_deprecated
def test_add_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.add_tag("a key", "some value", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.add_tag("a key", "some value")
chain = itertools.chain.from_iterable
existing_instances = list(
chain([res.instances for res in conn.get_all_reservations()])
)
existing_instances.should.have.length_of(1)
existing_instance = existing_instances[0]
existing_instance.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_remove_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
with pytest.raises(EC2ResponseError) as ex:
instance.remove_tag("a key", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
instance.add_tag("a key", "some value")
conn.get_all_tags().should.have.length_of(1)
instance.remove_tag("a key", "some value")
@mock_ec2_deprecated
def test_get_all_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_with_special_characters():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some<> value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some<> value")
@mock_ec2_deprecated
def test_create_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {
"a key": "some value",
"another key": "some other value",
"blank key": "",
}
with pytest.raises(EC2ResponseError) as ex:
conn.create_tags(instance.id, tag_dict, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
conn.create_tags(instance.id, tag_dict)
tags = conn.get_all_tags()
set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags]))
set([tag_dict[key] for key in tag_dict]).should.equal(
set([tag.value for tag in tags])
)
@mock_ec2_deprecated
def test_tag_limit_exceeded():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {}
for i in range(51):
tag_dict["{0:02d}".format(i + 1)] = ""
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
instance.add_tag("a key", "a value")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
tags = conn.get_all_tags()
tag = tags[0]
tags.should.have.length_of(1)
tag.name.should.equal("a key")
tag.value.should.equal("a value")
@mock_ec2_deprecated
def test_invalid_parameter_tag_null():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as cm:
instance.add_tag("a key", None)
cm.value.code.should.equal("InvalidParameterValue")
cm.value.status.should.equal(400)
@mock_ec2_deprecated
def test_invalid_id():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("ami-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("blah-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
@mock_ec2_deprecated
def test_get_all_tags_resource_id_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-id": instance.id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-id": image_id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_resource_type_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-type": "instance"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-type": "image"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_key_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"key": "an instance key"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_value_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("an instance key", "some other value")
reservation_c = conn.run_instances(EXAMPLE_AMI_ID)
instance_c = reservation_c.instances[0]
instance_c.add_tag("an instance key", "other value*")
reservation_d = conn.run_instances(EXAMPLE_AMI_ID)
instance_d = reservation_d.instances[0]
instance_d.add_tag("an instance key", "other value**")
reservation_e = conn.run_instances(EXAMPLE_AMI_ID)
instance_e = reservation_e.instances[0]
instance_e.add_tag("an instance key", "other value*?")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"value": "some value"})
tags.should.have.length_of(2)
tags = conn.get_all_tags(filters={"value": "some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value*"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": r"*value\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\?"})
tags.should.have.length_of(1)
@mock_ec2_deprecated
def test_retrieved_instances_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id)
conn.create_tags([instance.id], tags_to_be_set)
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
retrieved_tags = instance.tags
conn.terminate_instances([instances[0].id])
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_volumes_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
conn.create_tags([volume.id], tags_to_be_set)
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
retrieved_tags = volume.tags
volume.delete()
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_snapshots_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
volume = conn.create_volume(80, "eu-west-1a")
snapshot = conn.create_snapshot(volume.id)
conn.create_tags([snapshot.id], tags_to_be_set)
all_snapshots = conn.get_all_snapshots()
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
retrieved_tags = snapshot.tags
conn.delete_snapshot(snapshot.id)
volume.delete()
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_filter_instances_by_wildcard_tags():
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance_a = reservation.instances[0]
instance_a.add_tag("Key1", "Value1")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("Key1", "Value2")
reservations = conn.get_all_reservations(filters={"tag:Key1": "Value*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-key": "Key*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-value": "Value*"})
reservations.should.have.length_of(2)
@mock_ec2
def test_create_volume_with_tags():
client = boto3.client("ec2", "us-west-2")
response = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)
assert response["Tags"][0]["Key"] == "TEST_TAG"
@mock_ec2
def test_create_snapshot_with_tags():
client = boto3.client("ec2", "us-west-2")
volume_id = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)["VolumeId"]
snapshot = client.create_snapshot(
VolumeId=volume_id,
TagSpecifications=[
{
"ResourceType": "snapshot",
"Tags": [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}],
}
],
)
expected_tags = [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}]
assert snapshot["Tags"] == expected_tags
@mock_ec2
def test_create_tag_empty_resource():
client = boto3.client("ec2", region_name="us-west-1")
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_delete_tag_empty_resource():
client = boto3.client("ec2", region_name="us-west-1")
with pytest.raises(ClientError) as ex:
client.delete_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_retrieve_resource_with_multiple_tags():
ec2 = boto3.resource("ec2", region_name="us-west-1")
blue, green = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
ec2.create_tags(
Resources=[blue.instance_id],
Tags=[
{"Key": "environment", "Value": "blue"},
{"Key": "application", "Value": "api"},
],
)
ec2.create_tags(
Resources=[green.instance_id],
Tags=[
{"Key": "environment", "Value": "green"},
{"Key": "application", "Value": "api"},
],
)
green_instances = list(ec2.instances.filter(Filters=(get_filter("green"))))
green_instances.should.equal([green])
blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue"))))
blue_instances.should.equal([blue])
def get_filter(color):
return [
{"Name": "tag-key", "Values": ["application"]},
{"Name": "tag-value", "Values": ["api"]},
{"Name": "tag-key", "Values": ["environment"]},
{"Name": "tag-value", "Values": [color]},
]
| true
| true
|
7907380598336fc0f952cc45b3a88965a64e1ccd
| 554
|
py
|
Python
|
opwen_email_server/constants/azure.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
opwen_email_server/constants/azure.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
opwen_email_server/constants/azure.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
from typing_extensions import Final # noqa: F401
CONTAINER_CLIENT_PACKAGES = 'compressedpackages' # type: Final
CONTAINER_EMAILS = 'emails' # type: Final
CONTAINER_MAILBOX = 'mailbox' # type: Final
CONTAINER_SENDGRID_MIME = 'sendgridinboundemails' # type: Final
TABLE_DOMAIN_X_DELIVERED = 'emaildomainxdelivered' # type: Final
TABLE_AUTH = 'clientsauth' # type: Final
QUEUE_CLIENT_PACKAGE = 'lokoleinboundemails' # type: Final
QUEUE_EMAIL_SEND = 'sengridoutboundemails' # type: Final
QUEUE_SENDGRID_MIME = 'sengridinboundemails' # type: Final
| 46.166667
| 65
| 0.785199
|
from typing_extensions import Final
CONTAINER_CLIENT_PACKAGES = 'compressedpackages'
CONTAINER_EMAILS = 'emails'
CONTAINER_MAILBOX = 'mailbox'
CONTAINER_SENDGRID_MIME = 'sendgridinboundemails'
TABLE_DOMAIN_X_DELIVERED = 'emaildomainxdelivered'
TABLE_AUTH = 'clientsauth'
QUEUE_CLIENT_PACKAGE = 'lokoleinboundemails'
QUEUE_EMAIL_SEND = 'sengridoutboundemails'
QUEUE_SENDGRID_MIME = 'sengridinboundemails'
| true
| true
|
7907385213894c5913d607029267c1e7a291cd60
| 6,336
|
py
|
Python
|
scripts/python/html2plaintext.py
|
suchowan/bookmarks
|
77ef0399f8b3ead2356b86f8a5010a56d4b66f4d
|
[
"CC0-1.0"
] | 2
|
2019-05-04T04:35:07.000Z
|
2019-10-24T03:55:00.000Z
|
scripts/python/html2plaintext.py
|
suchowan/bookmarks
|
77ef0399f8b3ead2356b86f8a5010a56d4b66f4d
|
[
"CC0-1.0"
] | null | null | null |
scripts/python/html2plaintext.py
|
suchowan/bookmarks
|
77ef0399f8b3ead2356b86f8a5010a56d4b66f4d
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This script was written by Takashi SUGA on April-August 2017
# You may use and/or modify this file according to the license described in the MIT LICENSE.txt file https://raw.githubusercontent.com/suchowan/watson-api-client/master
"""『重要文抽出によるWebページ要約のためのHTMLテキスト分割』
http://harp.lib.hiroshima-u.ac.jp/hiroshima-cu/metadata/5532
を参考にした HTML テキスト化処理
"""
import codecs
import re
class Article:
# この順に文字コードを試みる
encodings = [
"utf-8",
"cp932",
"euc-jp",
"iso-2022-jp",
"latin_1"
]
# ブロックレベル要素抽出正規表現
block_level_tags = re.compile("(?i)</?(" + "|".join([
"address", "blockquote", "center", "dir", "div", "dl",
"fieldset", "form", "h[1-6]", "hr", "isindex", "menu",
"noframes", "noscript", "ol", "pre", "p", "table", "ul",
"dd", "dt", "frameset", "li", "tbody", "td", "tfoot",
"th", "thead", "tr"
]) + ")(>|[^a-z].*?>)")
def __init__(self, path):
print(path)
self.path = path
self.contents = self.get_contents()
# self.contents = self.get_title()
def get_contents(self):
for encoding in self.encodings:
try:
lines = codecs.open(self.path, 'r', encoding)
html = ' '.join(line.rstrip('\r\n') for line in lines)
return self.__get_contents_in_html(html)
except UnicodeDecodeError:
continue
print('Cannot detect encoding of ' + self.path)
return None
def __get_contents_in_html(self, html):
parts = re.split("(?i)<(?:body|frame).*?>", html, 1)
if len(parts) == 2:
head, body = parts
else:
print('Cannot split ' + self.path)
body = html
body = re.sub(r"(?i)<(script|style|select).*?>.*?</\1\s*>", " ", body)
body = re.sub(self.block_level_tags, ' _BLOCK_LEVEL_TAG_ ', body)
body = re.sub(r"(?i)<a\s.+?>", ' _ANCHOR_LEFT_TAG_ ', body)
body = re.sub("(?i)</a>", ' _ANCHOR_RIGHT_TAG_ ', body)
body = re.sub("(?i)<[/a-z].*?>", " ", body)
return re.sub(" +", " ", "".join(self.__get_contents_in_body(body)))
def __get_contents_in_body(self, body):
for block in body.split("_BLOCK_LEVEL_TAG_"):
yield from self.__get_contents_in_block(block)
def __get_contents_in_block(self, block):
self.in_sentence = False
for unit in block.split("。"):
yield from self.__get_contents_in_unit(unit)
if self.in_sentence:
yield '。\n'
def __get_contents_in_unit(self, unit):
image_link = "_ANCHOR_LEFT_TAG_ +_ANCHOR_RIGHT_TAG_"
unit = re.sub(image_link, " ", unit)
if re.match(r"^ *$", unit):
return
fragment_tag = "((?:_ANCHOR_LEFT_TAG_ .+?_ANCHOR_LEFT_TAG_ ){2,})"
for fragment in re.split(fragment_tag, unit):
yield from self.__get_contents_in_fragment(fragment)
def __get_contents_in_fragment(self, fragment):
fragment = re.sub("_ANCHOR_(LEFT|RIGHT)_TAG_", ' ', fragment)
if re.match(r"^ *$", fragment):
return
text_unit = TextUnit(fragment)
if text_unit.is_sentence():
# 文ユニットは“ 。”で終わる
if self.in_sentence:
yield '。\n'
yield text_unit.separated
yield ' 。\n'
self.in_sentence = False
else:
# 非文ユニットは“―。”で終わる
# (制約) 論文と相違し非文ユニットは結合のみ行い分割していない
yield text_unit.separated
yield '―'
self.in_sentence = True
def get_title(self):
return self.path.split('/')[-1]
from janome.tokenizer import Tokenizer
from collections import defaultdict
import mojimoji
#import re
class TextUnit:
tokenizer = Tokenizer("user_dic.csv", udic_type="simpledic", udic_enc="utf8")
def __init__(self,fragment):
self.fragment = fragment
self.categories = defaultdict(int)
separated = []
for token in self.tokenizer.tokenize(self.preprocess(self.fragment)):
self.categories[self.categorize(token.part_of_speech)] += 1
separated.append(token.surface)
separated.append('')
self.separated = '/'.join(separated)
def categorize(self,part_of_speech):
if re.match("^名詞,(一般|代名詞|固有名詞|サ変接続|[^,]+語幹)", part_of_speech):
return '自立'
if re.match("^動詞", part_of_speech) and not re.match("サ変", part_of_speech):
return '自立'
if re.match("^形容詞,自立", part_of_speech):
return '自立'
if re.match("^助詞", part_of_speech):
return '助詞'
if re.match("^助動詞", part_of_speech):
return '助動詞'
return 'その他'
def is_sentence(self):
if self.categories['自立'] == 0:
return False
match = 0
if self.categories['自立'] >= 7:
match += 1
if 100 * self.categories['自立'] / sum(self.categories.values()) <= 64:
match += 1
if 100 * (self.categories['助詞'] + self.categories['助動詞']) / self.categories['自立'] >= 22:
# 論文通り「付属語 = 助詞 ⋃ 助動詞」と解釈 (通常の定義と異なる)
match += 1
if 100 * self.categories['助詞'] / self.categories['自立'] >= 26:
match += 1
if 100 * self.categories['助動詞'] / self.categories['自立'] >= 6:
match += 1
return match >= 3
def preprocess(self, text):
text = re.sub("&[^;]+;", " ", text)
text = mojimoji.han_to_zen(text, digit=False)
text = re.sub('(\t | )+', " ", text)
return text
if __name__ == '__main__':
import glob
import os
path_pattern = '/home/samba/example/links/bookmarks.crawled/**/*.html'
# The converted plaintext is put as '/home/samba/example/links/bookmarks.plaintext/**/*.txt'
for path in glob.glob(path_pattern, recursive=True):
article = Article(path)
plaintext_path = re.sub("(?i)html?$", "txt", path.replace('.crawled', '.plaintext'))
plaintext_path = plaintext_path.replace('\\', '/')
plaintext_dir = re.sub("/[^/]+$", "", plaintext_path)
if not os.path.exists(plaintext_dir):
os.makedirs(plaintext_dir)
with codecs.open(plaintext_path, 'w', 'utf-8') as f:
f.write(article.contents)
| 36.413793
| 168
| 0.566288
|
import codecs
import re
class Article:
encodings = [
"utf-8",
"cp932",
"euc-jp",
"iso-2022-jp",
"latin_1"
]
block_level_tags = re.compile("(?i)</?(" + "|".join([
"address", "blockquote", "center", "dir", "div", "dl",
"fieldset", "form", "h[1-6]", "hr", "isindex", "menu",
"noframes", "noscript", "ol", "pre", "p", "table", "ul",
"dd", "dt", "frameset", "li", "tbody", "td", "tfoot",
"th", "thead", "tr"
]) + ")(>|[^a-z].*?>)")
def __init__(self, path):
print(path)
self.path = path
self.contents = self.get_contents()
def get_contents(self):
for encoding in self.encodings:
try:
lines = codecs.open(self.path, 'r', encoding)
html = ' '.join(line.rstrip('\r\n') for line in lines)
return self.__get_contents_in_html(html)
except UnicodeDecodeError:
continue
print('Cannot detect encoding of ' + self.path)
return None
def __get_contents_in_html(self, html):
parts = re.split("(?i)<(?:body|frame).*?>", html, 1)
if len(parts) == 2:
head, body = parts
else:
print('Cannot split ' + self.path)
body = html
body = re.sub(r"(?i)<(script|style|select).*?>.*?</\1\s*>", " ", body)
body = re.sub(self.block_level_tags, ' _BLOCK_LEVEL_TAG_ ', body)
body = re.sub(r"(?i)<a\s.+?>", ' _ANCHOR_LEFT_TAG_ ', body)
body = re.sub("(?i)</a>", ' _ANCHOR_RIGHT_TAG_ ', body)
body = re.sub("(?i)<[/a-z].*?>", " ", body)
return re.sub(" +", " ", "".join(self.__get_contents_in_body(body)))
def __get_contents_in_body(self, body):
for block in body.split("_BLOCK_LEVEL_TAG_"):
yield from self.__get_contents_in_block(block)
def __get_contents_in_block(self, block):
self.in_sentence = False
for unit in block.split("。"):
yield from self.__get_contents_in_unit(unit)
if self.in_sentence:
yield '。\n'
def __get_contents_in_unit(self, unit):
image_link = "_ANCHOR_LEFT_TAG_ +_ANCHOR_RIGHT_TAG_"
unit = re.sub(image_link, " ", unit)
if re.match(r"^ *$", unit):
return
fragment_tag = "((?:_ANCHOR_LEFT_TAG_ .+?_ANCHOR_LEFT_TAG_ ){2,})"
for fragment in re.split(fragment_tag, unit):
yield from self.__get_contents_in_fragment(fragment)
def __get_contents_in_fragment(self, fragment):
fragment = re.sub("_ANCHOR_(LEFT|RIGHT)_TAG_", ' ', fragment)
if re.match(r"^ *$", fragment):
return
text_unit = TextUnit(fragment)
if text_unit.is_sentence():
if self.in_sentence:
yield '。\n'
yield text_unit.separated
yield ' 。\n'
self.in_sentence = False
else:
yield text_unit.separated
yield '―'
self.in_sentence = True
def get_title(self):
return self.path.split('/')[-1]
from janome.tokenizer import Tokenizer
from collections import defaultdict
import mojimoji
class TextUnit:
tokenizer = Tokenizer("user_dic.csv", udic_type="simpledic", udic_enc="utf8")
def __init__(self,fragment):
self.fragment = fragment
self.categories = defaultdict(int)
separated = []
for token in self.tokenizer.tokenize(self.preprocess(self.fragment)):
self.categories[self.categorize(token.part_of_speech)] += 1
separated.append(token.surface)
separated.append('')
self.separated = '/'.join(separated)
def categorize(self,part_of_speech):
if re.match("^名詞,(一般|代名詞|固有名詞|サ変接続|[^,]+語幹)", part_of_speech):
return '自立'
if re.match("^動詞", part_of_speech) and not re.match("サ変", part_of_speech):
return '自立'
if re.match("^形容詞,自立", part_of_speech):
return '自立'
if re.match("^助詞", part_of_speech):
return '助詞'
if re.match("^助動詞", part_of_speech):
return '助動詞'
return 'その他'
def is_sentence(self):
if self.categories['自立'] == 0:
return False
match = 0
if self.categories['自立'] >= 7:
match += 1
if 100 * self.categories['自立'] / sum(self.categories.values()) <= 64:
match += 1
if 100 * (self.categories['助詞'] + self.categories['助動詞']) / self.categories['自立'] >= 22:
match += 1
if 100 * self.categories['助詞'] / self.categories['自立'] >= 26:
match += 1
if 100 * self.categories['助動詞'] / self.categories['自立'] >= 6:
match += 1
return match >= 3
def preprocess(self, text):
text = re.sub("&[^;]+;", " ", text)
text = mojimoji.han_to_zen(text, digit=False)
text = re.sub('(\t | )+', " ", text)
return text
if __name__ == '__main__':
import glob
import os
path_pattern = '/home/samba/example/links/bookmarks.crawled/**/*.html'
for path in glob.glob(path_pattern, recursive=True):
article = Article(path)
plaintext_path = re.sub("(?i)html?$", "txt", path.replace('.crawled', '.plaintext'))
plaintext_path = plaintext_path.replace('\\', '/')
plaintext_dir = re.sub("/[^/]+$", "", plaintext_path)
if not os.path.exists(plaintext_dir):
os.makedirs(plaintext_dir)
with codecs.open(plaintext_path, 'w', 'utf-8') as f:
f.write(article.contents)
| true
| true
|
79073923e7c772300a11c4ca95af346837d8cd04
| 30,193
|
py
|
Python
|
pandapower/converter/pypower/from_ppc.py
|
hmaschke/pandapower-1
|
2e93969050d3d468ce57f73d358e97fabc6e5141
|
[
"BSD-3-Clause"
] | 2
|
2019-11-01T11:01:41.000Z
|
2022-02-07T12:55:55.000Z
|
pandapower/converter/pypower/from_ppc.py
|
hmaschke/pandapower-1
|
2e93969050d3d468ce57f73d358e97fabc6e5141
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/converter/pypower/from_ppc.py
|
hmaschke/pandapower-1
|
2e93969050d3d468ce57f73d358e97fabc6e5141
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from math import pi
from numpy import sign, nan, append, zeros, array, sqrt, where
from numpy import max as max_
from pandas import Series, DataFrame, concat
from pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS
from pandapower.pypower.idx_cost import COST, NCOST
from pandapower.pypower.idx_bus import BUS_I, BASE_KV
import pandapower as pp
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from pypower import ppoption, runpf, runopf, rundcpf, rundcopf
ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)
pypower_import = True
except ImportError:
pypower_import = False
ppc_elms = ["bus", "branch", "gen"]
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
raise NotImplementedError
pp.create_pwl_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
ncost = ppc['gencost'][idx, NCOST]
if ncost == 1:
cp2 = 0
cp1 = 0
cp0 = ppc['gencost'][idx, COST]
elif ncost == 2:
cp2 = 0
cp1 = ppc['gencost'][idx, COST]
cp0 = ppc['gencost'][idx, COST + 1]
elif ncost == 3:
cp2 = ppc['gencost'][idx, COST]
cp1 = ppc['gencost'][idx, COST + 1]
cp0 = ppc['gencost'][idx, COST + 2]
elif ncost > 3:
logger.warning("The pandapower poly_cost table only supports up to 2nd order " +
"polynomials. The ppc higher order polynomials cannot be converted.")
cp2 = ppc['gencost'][idx, COST + ncost - 3]
cp1 = ppc['gencost'][idx, COST + ncost - 2]
cp0 = ppc['gencost'][idx, COST + ncost - 1]
else:
raise ValueError("'ncost' must be an positve integer but is " + str(ncost))
pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def _gen_bus_info(ppc, idx_gen):
bus_name = int(ppc["gen"][idx_gen, GEN_BUS])
# assumption: there is only one bus with this bus_name:
idx_bus = int(where(ppc["bus"][:, BUS_I] == bus_name)[0][0])
current_bus_type = int(ppc["bus"][idx_bus, 1])
same_bus_gen_idx = where(ppc["gen"][:, GEN_BUS] == ppc["gen"][idx_gen, GEN_BUS])[0].astype(int)
same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc["gen"][same_bus_gen_idx, GEN_STATUS] > 0)]
first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(
same_bus_in_service_gen_idx) else None
last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(
same_bus_in_service_gen_idx) else None
return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
For running the validation, the ppc must already contain the pypower
powerflow results or pypower must be importable.
****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, BASE_KV] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 10], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],
q_mvar=-ppc['bus'][i, 5])
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
for i in range(len(ppc['gen'][:, 0])):
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)
# create ext_grid
if current_bus_type == 3:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create gen
elif current_bus_type == 2:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
p_mw=ppc['gen'][i, 1],
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],
q_mvar=ppc['gen'][i, 2], type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0): # create line
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol', max_loading_percent=100,
in_service=bool(ppc['branch'][i, 10]))
else: # create transformer
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tap_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tap_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5]
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,
vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1),
tap_side=tap_side, tap_neutral=0)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, -1))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net, **kwargs):
logger.error("Validation failed.")
net._options = {}
net._options["gen_lookup"] = gen_lookup
return net
def _validate_diff_res(diff_res, max_diff_values):
to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',
'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}
if not len(to_iterate):
logger.warning("There are no keys to validate.")
val = True
for i in to_iterate:
elm = i.split("_")[0]
sought = ["p", "q"] if elm != "bus" else ["vm", "va"]
col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != "branch" else \
list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])
val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])
return val
def validate_from_ppc(ppc_net, net, pf_type="runpp", max_diff_values={
"bus_vm_pu": 1e-6, "bus_va_degree": 1e-5, "branch_p_mw": 1e-6, "branch_q_mvar": 1e-6,
"gen_p_mw": 1e-6, "gen_q_mvar": 1e-6}, run=True):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file, which must already contain the pypower powerflow
results or pypower must be importable.
**net** - The pandapower network.
OPTIONAL:
**pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",
"rundcpp", "runopp", "rundcopp")
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and
the values floats.
**run** (True, bool or list of two bools) - changing the value to False avoids trying to run
(optimal) loadflows. Giving a list of two bools addresses first pypower and second
pandapower.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net or pypower is importable.
"""
# check in case of optimal powerflow comparison whether cost information exist
if "opp" in pf_type:
if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):
if "gencost" in ppc_net:
if not len(ppc_net["gencost"]):
logger.debug('ppc and pandapower net do not include cost information.')
return True
else:
logger.error('The pandapower net does not include cost information.')
return False
else:
logger.debug('ppc and pandapower net do not include cost information.')
return True
# guarantee run parameter as list, for pypower and pandapower (optimal) powerflow run
run = [run, run] if isinstance(run, bool) else run
# --- check pypower powerflow success, if possible
if pypower_import and run[0]:
try:
if pf_type == "runpp":
ppc_net = runpf.runpf(ppc_net, ppopt)[0]
elif pf_type == "rundcpp":
ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]
elif pf_type == "runopp":
ppc_net = runopf.runopf(ppc_net, ppopt)
elif pf_type == "rundcopp":
ppc_net = rundcopf.rundcopf(ppc_net, ppopt)
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
except:
logger.debug("The pypower run did not work.")
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
if run[1]:
if pf_type == "runpp":
try:
pp.runpp(net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, trafo_model="pi", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
elif pf_type == "rundcpp":
try:
pp.rundcpp(net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower dc powerflow does not converge.')
elif pf_type == "runopp":
try:
pp.runopp(net, init="flat", calculate_voltage_angles=True)
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=True)
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
try:
pp.runopp(net, init="flat", calculate_voltage_angles=False)
logger.info("voltage_angles could be calculated.")
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
logger.error('The pandapower optimal powerflow does not converge.')
elif pf_type == "rundcopp":
try:
pp.rundcopp(net)
except pp.LoadflowNotConverged:
logger.error('The pandapower dc optimal powerflow does not converge.')
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if not ppc_success:
return False
if "opp" in pf_type:
if not net.OPF_converged:
return
elif not net.converged:
return False
# --- store pypower powerflow results
ppc_res = dict.fromkeys(ppc_elms)
ppc_res["branch"] = ppc_net['branch'][:, 13:17]
ppc_res["bus"] = ppc_net['bus'][:, 7:9]
ppc_res["gen"] = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res = dict.fromkeys(ppc_elms)
pp_res["bus"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res["gen"] = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc_net["gen"].shape) == 1:
ppc_net["gen"] = array(ppc_net["gen"], ndmin=2)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
GEN_uniq = GENS.drop_duplicates()
already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),
index=[int(v) for v in GEN_uniq.values])
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)
if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_ext_grid[
net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_gen[
net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
else:
pp_res["gen"] = append(pp_res["gen"], array(net.res_sgen[
net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[
already_used_gen.at[int(j)]].reshape((1, 2)), 0)
already_used_gen.at[int(j)] += 1
change_q_compare += [int(j)]
pp_res["gen"] = pp_res["gen"][1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res["branch"] = zeros([1, 4])
# consideration of parallel branches via storing how often branches were considered
# each node-to-node-connection
try:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,
sort=True).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,
sort=True).drop_duplicates()
except TypeError:
# legacy pandas < 0.21
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
try:
already_used_branches = concat([init1, init2], axis=0, sort=True)
except TypeError:
# pandas < 0.21 legacy
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])
for i in BRANCHES.index:
from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 1]))
from_vn_kv = ppc_net['bus'][from_bus, 9]
to_vn_kv = ppc_net['bus'][to_bus, 9]
ratio = BRANCHES[2].at[i]
angle = BRANCHES[3].at[i]
# from line results
if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):
pp_res["branch"] = append(pp_res["branch"], array(net.res_line[
(net.line.from_bus == from_bus) &
(net.line.to_bus == to_bus)]
[['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)] += 1
# from trafo results
else:
if from_vn_kv >= to_vn_kv:
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == from_bus) &
(net.trafo.lv_bus == to_bus)]
[['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)] += 1
else: # switch hv-lv-connection of pypower connection buses
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == to_bus) &
(net.trafo.lv_bus == from_bus)]
[['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)] += 1
pp_res["branch"] = pp_res["branch"][1:, :] # delete initial zero row
# --- do the powerflow result comparison
diff_res = dict.fromkeys(ppc_elms)
diff_res["bus"] = ppc_res["bus"] - pp_res["bus"]
diff_res["bus"][:, 1] -= diff_res["bus"][0, 1] # remove va_degree offset
diff_res["branch"] = ppc_res["branch"] - pp_res["branch"]
diff_res["gen"] = ppc_res["gen"] - pp_res["gen"]
# comparison of buses with several generator units only as q sum
for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:
next_is = GEN_uniq.index[GEN_uniq.index > i]
if len(next_is) > 0:
next_i = next_is[0]
else:
next_i = GENS.index[-1] + 1
if (next_i - i) > 1:
diff_res["gen"][i:next_i, 1] = sum(diff_res["gen"][i:next_i, 1])
# logger info
logger.debug("Maximum voltage magnitude difference between pypower and pandapower: "
"%.2e pu" % max_(abs(diff_res["bus"][:, 0])))
logger.debug("Maximum voltage angle difference between pypower and pandapower: "
"%.2e degree" % max_(abs(diff_res["bus"][:, 1])))
logger.debug("Maximum branch flow active power difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["branch"][:, [0, 2]])))
logger.debug("Maximum branch flow reactive power difference between pypower and "
"pandapower: %.2e MVAr" % max_(abs(diff_res["branch"][:, [1, 3]])))
logger.debug("Maximum active power generation difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["gen"][:, 0])))
logger.debug("Maximum reactive power generation difference between pypower and pandapower: "
"%.2e MVAr" % max_(abs(diff_res["gen"][:, 1])))
if _validate_diff_res(diff_res, {"bus_vm_pu": 1e-3, "bus_va_degree": 1e-3, "branch_p_mw": 1e-6,
"branch_q_mvar": 1e-6}) and \
(max_(abs(diff_res["gen"])) > 1e-1).any():
logger.debug("The active/reactive power generation difference possibly results "
"because of a pypower error. Please validate "
"the results via pypower loadflow.") # this occurs e.g. at ppc case9
# give a return
if isinstance(max_diff_values, dict):
return _validate_diff_res(diff_res, max_diff_values)
else:
logger.debug("'max_diff_values' must be a dict.")
| 48.93517
| 103
| 0.568509
|
from math import pi
from numpy import sign, nan, append, zeros, array, sqrt, where
from numpy import max as max_
from pandas import Series, DataFrame, concat
from pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS
from pandapower.pypower.idx_cost import COST, NCOST
from pandapower.pypower.idx_bus import BUS_I, BASE_KV
import pandapower as pp
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from pypower import ppoption, runpf, runopf, rundcpf, rundcopf
ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)
pypower_import = True
except ImportError:
pypower_import = False
ppc_elms = ["bus", "branch", "gen"]
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
raise NotImplementedError
pp.create_pwl_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
ncost = ppc['gencost'][idx, NCOST]
if ncost == 1:
cp2 = 0
cp1 = 0
cp0 = ppc['gencost'][idx, COST]
elif ncost == 2:
cp2 = 0
cp1 = ppc['gencost'][idx, COST]
cp0 = ppc['gencost'][idx, COST + 1]
elif ncost == 3:
cp2 = ppc['gencost'][idx, COST]
cp1 = ppc['gencost'][idx, COST + 1]
cp0 = ppc['gencost'][idx, COST + 2]
elif ncost > 3:
logger.warning("The pandapower poly_cost table only supports up to 2nd order " +
"polynomials. The ppc higher order polynomials cannot be converted.")
cp2 = ppc['gencost'][idx, COST + ncost - 3]
cp1 = ppc['gencost'][idx, COST + ncost - 2]
cp0 = ppc['gencost'][idx, COST + ncost - 1]
else:
raise ValueError("'ncost' must be an positve integer but is " + str(ncost))
pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def _gen_bus_info(ppc, idx_gen):
bus_name = int(ppc["gen"][idx_gen, GEN_BUS])
idx_bus = int(where(ppc["bus"][:, BUS_I] == bus_name)[0][0])
current_bus_type = int(ppc["bus"][idx_bus, 1])
same_bus_gen_idx = where(ppc["gen"][:, GEN_BUS] == ppc["gen"][idx_gen, GEN_BUS])[0].astype(int)
same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc["gen"][same_bus_gen_idx, GEN_STATUS] > 0)]
first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(
same_bus_in_service_gen_idx) else None
last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(
same_bus_in_service_gen_idx) else None
return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
if Series(ppc['bus'][:, BASE_KV] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
baseMVA = ppc['baseMVA']
omega = pi * f_hz
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)
for i in range(len(ppc['bus'])):
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 10], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],
q_mvar=-ppc['bus'][i, 5])
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
for i in range(len(ppc['gen'][:, 0])):
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)
if current_bus_type == 3:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
elif current_bus_type == 2:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
p_mw=ppc['gen'][i, 1],
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],
q_mvar=ppc['gen'][i, 2], type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0):
Zni = ppc['bus'][to_bus, 9]**2/baseMVA
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol', max_loading_percent=100,
in_service=bool(ppc['branch'][i, 10]))
else:
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tap_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tap_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5]
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,
vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1),
tap_side=tap_side, tap_neutral=0)
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
ppc['gencost'] = ppc['gencost'].reshape((1, -1))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net, **kwargs):
logger.error("Validation failed.")
net._options = {}
net._options["gen_lookup"] = gen_lookup
return net
def _validate_diff_res(diff_res, max_diff_values):
to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',
'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}
if not len(to_iterate):
logger.warning("There are no keys to validate.")
val = True
for i in to_iterate:
elm = i.split("_")[0]
sought = ["p", "q"] if elm != "bus" else ["vm", "va"]
col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != "branch" else \
list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])
val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])
return val
def validate_from_ppc(ppc_net, net, pf_type="runpp", max_diff_values={
"bus_vm_pu": 1e-6, "bus_va_degree": 1e-5, "branch_p_mw": 1e-6, "branch_q_mvar": 1e-6,
"gen_p_mw": 1e-6, "gen_q_mvar": 1e-6}, run=True):
if "opp" in pf_type:
if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):
if "gencost" in ppc_net:
if not len(ppc_net["gencost"]):
logger.debug('ppc and pandapower net do not include cost information.')
return True
else:
logger.error('The pandapower net does not include cost information.')
return False
else:
logger.debug('ppc and pandapower net do not include cost information.')
return True
run = [run, run] if isinstance(run, bool) else run
if pypower_import and run[0]:
try:
if pf_type == "runpp":
ppc_net = runpf.runpf(ppc_net, ppopt)[0]
elif pf_type == "rundcpp":
ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]
elif pf_type == "runopp":
ppc_net = runopf.runopf(ppc_net, ppopt)
elif pf_type == "rundcopp":
ppc_net = rundcopf.rundcopf(ppc_net, ppopt)
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
except:
logger.debug("The pypower run did not work.")
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
if run[1]:
if pf_type == "runpp":
try:
pp.runpp(net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, trafo_model="pi", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
elif pf_type == "rundcpp":
try:
pp.rundcpp(net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower dc powerflow does not converge.')
elif pf_type == "runopp":
try:
pp.runopp(net, init="flat", calculate_voltage_angles=True)
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=True)
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
try:
pp.runopp(net, init="flat", calculate_voltage_angles=False)
logger.info("voltage_angles could be calculated.")
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
logger.error('The pandapower optimal powerflow does not converge.')
elif pf_type == "rundcopp":
try:
pp.rundcopp(net)
except pp.LoadflowNotConverged:
logger.error('The pandapower dc optimal powerflow does not converge.')
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
if not ppc_success:
return False
if "opp" in pf_type:
if not net.OPF_converged:
return
elif not net.converged:
return False
ppc_res = dict.fromkeys(ppc_elms)
ppc_res["branch"] = ppc_net['branch'][:, 13:17]
ppc_res["bus"] = ppc_net['bus'][:, 7:9]
ppc_res["gen"] = ppc_net['gen'][:, 1:3]
pp_res = dict.fromkeys(ppc_elms)
pp_res["bus"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])
pp_res["gen"] = zeros([1, 2])
if len(ppc_net["gen"].shape) == 1:
ppc_net["gen"] = array(ppc_net["gen"], ndmin=2)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
GEN_uniq = GENS.drop_duplicates()
already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),
index=[int(v) for v in GEN_uniq.values])
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)
if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_ext_grid[
net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_gen[
net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
else:
pp_res["gen"] = append(pp_res["gen"], array(net.res_sgen[
net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[
already_used_gen.at[int(j)]].reshape((1, 2)), 0)
already_used_gen.at[int(j)] += 1
change_q_compare += [int(j)]
pp_res["gen"] = pp_res["gen"][1:, :]
pp_res["branch"] = zeros([1, 4])
try:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,
sort=True).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,
sort=True).drop_duplicates()
except TypeError:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
try:
already_used_branches = concat([init1, init2], axis=0, sort=True)
except TypeError:
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])
for i in BRANCHES.index:
from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 1]))
from_vn_kv = ppc_net['bus'][from_bus, 9]
to_vn_kv = ppc_net['bus'][to_bus, 9]
ratio = BRANCHES[2].at[i]
angle = BRANCHES[3].at[i]
if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):
pp_res["branch"] = append(pp_res["branch"], array(net.res_line[
(net.line.from_bus == from_bus) &
(net.line.to_bus == to_bus)]
[['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)] += 1
else:
if from_vn_kv >= to_vn_kv:
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == from_bus) &
(net.trafo.lv_bus == to_bus)]
[['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)] += 1
else:
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == to_bus) &
(net.trafo.lv_bus == from_bus)]
[['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)] += 1
pp_res["branch"] = pp_res["branch"][1:, :]
diff_res = dict.fromkeys(ppc_elms)
diff_res["bus"] = ppc_res["bus"] - pp_res["bus"]
diff_res["bus"][:, 1] -= diff_res["bus"][0, 1]
diff_res["branch"] = ppc_res["branch"] - pp_res["branch"]
diff_res["gen"] = ppc_res["gen"] - pp_res["gen"]
for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:
next_is = GEN_uniq.index[GEN_uniq.index > i]
if len(next_is) > 0:
next_i = next_is[0]
else:
next_i = GENS.index[-1] + 1
if (next_i - i) > 1:
diff_res["gen"][i:next_i, 1] = sum(diff_res["gen"][i:next_i, 1])
logger.debug("Maximum voltage magnitude difference between pypower and pandapower: "
"%.2e pu" % max_(abs(diff_res["bus"][:, 0])))
logger.debug("Maximum voltage angle difference between pypower and pandapower: "
"%.2e degree" % max_(abs(diff_res["bus"][:, 1])))
logger.debug("Maximum branch flow active power difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["branch"][:, [0, 2]])))
logger.debug("Maximum branch flow reactive power difference between pypower and "
"pandapower: %.2e MVAr" % max_(abs(diff_res["branch"][:, [1, 3]])))
logger.debug("Maximum active power generation difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["gen"][:, 0])))
logger.debug("Maximum reactive power generation difference between pypower and pandapower: "
"%.2e MVAr" % max_(abs(diff_res["gen"][:, 1])))
if _validate_diff_res(diff_res, {"bus_vm_pu": 1e-3, "bus_va_degree": 1e-3, "branch_p_mw": 1e-6,
"branch_q_mvar": 1e-6}) and \
(max_(abs(diff_res["gen"])) > 1e-1).any():
logger.debug("The active/reactive power generation difference possibly results "
"because of a pypower error. Please validate "
"the results via pypower loadflow.")
if isinstance(max_diff_values, dict):
return _validate_diff_res(diff_res, max_diff_values)
else:
logger.debug("'max_diff_values' must be a dict.")
| true
| true
|
79073a669782801e093872305d757595730f469c
| 1,278
|
py
|
Python
|
src/stack-hci/azext_stack_hci/generated/commands.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/stack-hci/azext_stack_hci/generated/commands.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/stack-hci/azext_stack_hci/generated/commands.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_stack_hci.generated._client_factory import cf_cluster
stack_hci_cluster = CliCommandType(
operations_tmpl='azext_stack_hci.vendored_sdks.azurestackhci.operations._cluster_operations#ClusterOperations.{}',
client_factory=cf_cluster)
with self.command_group('stack-hci cluster', stack_hci_cluster, client_factory=cf_cluster) as g:
g.custom_command('list', 'stack_hci_cluster_list')
g.custom_show_command('show', 'stack_hci_cluster_show')
g.custom_command('create', 'stack_hci_cluster_create')
g.custom_command('update', 'stack_hci_cluster_update')
g.custom_command('delete', 'stack_hci_cluster_delete', confirmation=True)
| 45.642857
| 122
| 0.676056
|
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_stack_hci.generated._client_factory import cf_cluster
stack_hci_cluster = CliCommandType(
operations_tmpl='azext_stack_hci.vendored_sdks.azurestackhci.operations._cluster_operations#ClusterOperations.{}',
client_factory=cf_cluster)
with self.command_group('stack-hci cluster', stack_hci_cluster, client_factory=cf_cluster) as g:
g.custom_command('list', 'stack_hci_cluster_list')
g.custom_show_command('show', 'stack_hci_cluster_show')
g.custom_command('create', 'stack_hci_cluster_create')
g.custom_command('update', 'stack_hci_cluster_update')
g.custom_command('delete', 'stack_hci_cluster_delete', confirmation=True)
| true
| true
|
79073b2ef124ed61d465d23155f6a62b5a72fc2d
| 2,074
|
py
|
Python
|
setup.py
|
abcsFrederick/HistomicsUI
|
335702b8e00e39cd733c212f894d4ef4c6a8b140
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
abcsFrederick/HistomicsUI
|
335702b8e00e39cd733c212f894d4ef4c6a8b140
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
abcsFrederick/HistomicsUI
|
335702b8e00e39cd733c212f894d4ef4c6a8b140
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='histomicsui',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='Organize, visualize, and analyze histology images.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'girder-large-image-annotation>=1.4.2',
'girder-slicer-cli-web[girder]>=1.2.0',
'girder-worker[girder]>=0.6.0',
'celery>=4.4.0rc5',
],
license='Apache Software License 2.0',
long_description=readme,
long_description_content_type='text/x-rst',
include_package_data=True,
keywords='girder-plugin, histomicsui',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/DigitalSlideArchive/histomicsui',
zip_safe=False,
python_requires='>=3.6',
entry_points={
'girder.plugin': [
'histomicsui = histomicsui:GirderPlugin'
]
},
)
| 32.920635
| 70
| 0.653809
|
import os
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
def prerelease_local_scheme(version):
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='histomicsui',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='Organize, visualize, and analyze histology images.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'girder-large-image-annotation>=1.4.2',
'girder-slicer-cli-web[girder]>=1.2.0',
'girder-worker[girder]>=0.6.0',
'celery>=4.4.0rc5',
],
license='Apache Software License 2.0',
long_description=readme,
long_description_content_type='text/x-rst',
include_package_data=True,
keywords='girder-plugin, histomicsui',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/DigitalSlideArchive/histomicsui',
zip_safe=False,
python_requires='>=3.6',
entry_points={
'girder.plugin': [
'histomicsui = histomicsui:GirderPlugin'
]
},
)
| true
| true
|
79073b63e912a29549a90c5cea1a691c5c4ac18f
| 7,277
|
py
|
Python
|
src/helpers/conversation.py
|
R-Mielamud/Telegram_BooksDelivery
|
0745e60a4541f38fba8ac378185aff558ec95147
|
[
"MIT"
] | 2
|
2020-12-27T18:04:58.000Z
|
2021-06-18T13:47:37.000Z
|
src/helpers/conversation.py
|
R-Mielamud/Telegram_BooksDelivery
|
0745e60a4541f38fba8ac378185aff558ec95147
|
[
"MIT"
] | null | null | null |
src/helpers/conversation.py
|
R-Mielamud/Telegram_BooksDelivery
|
0745e60a4541f38fba8ac378185aff558ec95147
|
[
"MIT"
] | 1
|
2021-12-13T02:58:19.000Z
|
2021-12-13T02:58:19.000Z
|
from helpers.language import estr
ACTION_TEXT = "text"
ACTION_TEXT_QUESTION = "text_question"
ACTION_LIST_QUESTION = "list_question"
ACTION_YES_NO_QUESTION = "yesno_question"
ACTION_CHOICES_QUESTION = "choices_question"
ID = "id"
NO = "no"
YES = "yes"
TEXT = "text"
ON_NO = "on_no"
ON_YES = "on_yes"
ACTION = "action"
CHOICES = "choices"
ON_CHOICES = "on_choices"
MAX_ANSWERS = "max_answers"
STOP_COMMAND = "stop_command"
START_NUMBER = "start_number"
CONVERSATION = "conversation"
ON_INVALID_CHOICE = "on_invalid_choice"
class Result:
def __init__(self, text, skip=False):
self.text = text
self.skip = skip
class StoppableDict:
def __init__(self, data={}, stopped=False):
self.data = data
self.stopped = stopped
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
def toggle_stop(self, value=None):
if value is None:
value = not self.stopped
self.stopped = value
class Level:
def __init__(self, questions, index=-1):
self.questions = questions
self.index = index
def incr(self):
self.index += 1
def should_reset(self):
return len(self.questions) == self.index + 1
def get_next_question(self):
self.incr()
if len(self.questions) > self.index:
return self.questions[self.index]
class Levels:
def __init__(self, initial=[]):
self.levels = initial
@property
def level(self):
last_index = len(self.levels) - 1
return self.levels[last_index]
def reset_last(self):
if len(self.levels) > 1:
return self.levels.pop()
def change_level(self, level):
self.levels.append(level)
def get_next_question(self):
question = self.level.get_next_question()
if question is not None:
return question
if self.reset_last() is None:
return None
return self.get_next_question()
class Conversation:
def __init__(self, manifest, default_answers={}):
self._manifest = manifest[CONVERSATION]
self._stop_command = manifest.get(STOP_COMMAND)
self._answers = StoppableDict(default_answers)
keys = list(default_answers.keys())
if len(keys) == 0:
self._current_question = None
self._levels = Levels([Level(self._manifest)])
else:
qid = keys[len(keys) - 1]
result = self._get_question_by_id(self._manifest, qid)
self._levels = result["levels"]
self._current_question = result["item"]
@property
def answers(self):
return self._answers
def _must_stop(self, prev_answer):
return estr(prev_answer, self._stop_command)
def _get_question_by_id(self, level_list, qid, prev_levels=None):
level = Level(level_list)
if prev_levels is not None:
prev_levels.change_level(level)
else:
prev_levels = Levels([level])
for item in level_list:
prev_levels.level.incr()
if type(item) == dict:
if item.get(ID) == qid and item.get(ACTION):
return {"levels": prev_levels, "item": item}
else:
for key in item:
if key == ON_NO or key == ON_YES:
result = self._get_question_by_id(item[key], qid, prev_levels)
if result is not None:
return result
elif key == ON_CHOICES:
for choice in item[key]:
result = self._get_question_by_id(item[key][choice], qid, prev_levels)
if result is not None:
return result
def get_next_question(self, prev_answer=None):
prev_question = self._current_question
if self._stop_command and (self._must_stop(prev_answer) or self._answers.stopped):
self._answers.toggle_stop(True)
return None
if prev_question:
if prev_question[ACTION] == ACTION_TEXT_QUESTION:
self._answers.set(prev_question[ID], prev_answer)
elif prev_question[ACTION] == ACTION_YES_NO_QUESTION:
yes = estr(prev_answer, prev_question[YES])
no = estr(prev_answer, prev_question[NO])
if not (yes or no):
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], yes)
level = prev_question[ON_YES] if yes else prev_question[ON_NO]
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_CHOICES_QUESTION:
choice_id = prev_question[CHOICES].get(prev_answer)
if choice_id is None:
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], choice_id)
level = Level(prev_question[ON_CHOICES][choice_id])
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_LIST_QUESTION:
if not estr(prev_answer, prev_question[STOP_COMMAND]):
answers = self._answers.get(prev_question[ID])
if answers is None:
answers = []
self._answers.set(prev_question[ID], [])
if prev_answer:
self._answers.set(prev_question[ID], [*answers, prev_answer])
answers.append(prev_answer)
count = len(answers)
max_answers = prev_question[MAX_ANSWERS]
if count < max_answers:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER] + count)
return Result(text)
elif prev_question[ACTION] == ACTION_TEXT:
self._answers.set(prev_question[ID], True)
self._current_question = self._levels.get_next_question()
if self._current_question is not None:
text = None
if self._current_question[ACTION] != ACTION_LIST_QUESTION:
text = self._current_question[TEXT]
else:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER])
self._answers.set(self._current_question[ID], None)
return Result(text, self._current_question[ACTION] == ACTION_TEXT)
class ConversationsStorage:
def __init__(self):
self.conversations = {}
def add(self, cid, *args, **kwargs):
conversation = Conversation(*args, **kwargs)
self.conversations[cid] = conversation
return conversation
def get(self, cid):
return self.conversations.get(cid)
def set(self, cid, conversation):
self.conversations[cid] = conversation
def remove(self, cid):
return self.conversations.pop(cid, None)
def exists(self, cid):
conversation = self.get(cid)
return bool(conversation)
| 32.342222
| 120
| 0.588292
|
from helpers.language import estr
ACTION_TEXT = "text"
ACTION_TEXT_QUESTION = "text_question"
ACTION_LIST_QUESTION = "list_question"
ACTION_YES_NO_QUESTION = "yesno_question"
ACTION_CHOICES_QUESTION = "choices_question"
ID = "id"
NO = "no"
YES = "yes"
TEXT = "text"
ON_NO = "on_no"
ON_YES = "on_yes"
ACTION = "action"
CHOICES = "choices"
ON_CHOICES = "on_choices"
MAX_ANSWERS = "max_answers"
STOP_COMMAND = "stop_command"
START_NUMBER = "start_number"
CONVERSATION = "conversation"
ON_INVALID_CHOICE = "on_invalid_choice"
class Result:
def __init__(self, text, skip=False):
self.text = text
self.skip = skip
class StoppableDict:
def __init__(self, data={}, stopped=False):
self.data = data
self.stopped = stopped
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
def toggle_stop(self, value=None):
if value is None:
value = not self.stopped
self.stopped = value
class Level:
def __init__(self, questions, index=-1):
self.questions = questions
self.index = index
def incr(self):
self.index += 1
def should_reset(self):
return len(self.questions) == self.index + 1
def get_next_question(self):
self.incr()
if len(self.questions) > self.index:
return self.questions[self.index]
class Levels:
def __init__(self, initial=[]):
self.levels = initial
@property
def level(self):
last_index = len(self.levels) - 1
return self.levels[last_index]
def reset_last(self):
if len(self.levels) > 1:
return self.levels.pop()
def change_level(self, level):
self.levels.append(level)
def get_next_question(self):
question = self.level.get_next_question()
if question is not None:
return question
if self.reset_last() is None:
return None
return self.get_next_question()
class Conversation:
def __init__(self, manifest, default_answers={}):
self._manifest = manifest[CONVERSATION]
self._stop_command = manifest.get(STOP_COMMAND)
self._answers = StoppableDict(default_answers)
keys = list(default_answers.keys())
if len(keys) == 0:
self._current_question = None
self._levels = Levels([Level(self._manifest)])
else:
qid = keys[len(keys) - 1]
result = self._get_question_by_id(self._manifest, qid)
self._levels = result["levels"]
self._current_question = result["item"]
@property
def answers(self):
return self._answers
def _must_stop(self, prev_answer):
return estr(prev_answer, self._stop_command)
def _get_question_by_id(self, level_list, qid, prev_levels=None):
level = Level(level_list)
if prev_levels is not None:
prev_levels.change_level(level)
else:
prev_levels = Levels([level])
for item in level_list:
prev_levels.level.incr()
if type(item) == dict:
if item.get(ID) == qid and item.get(ACTION):
return {"levels": prev_levels, "item": item}
else:
for key in item:
if key == ON_NO or key == ON_YES:
result = self._get_question_by_id(item[key], qid, prev_levels)
if result is not None:
return result
elif key == ON_CHOICES:
for choice in item[key]:
result = self._get_question_by_id(item[key][choice], qid, prev_levels)
if result is not None:
return result
def get_next_question(self, prev_answer=None):
prev_question = self._current_question
if self._stop_command and (self._must_stop(prev_answer) or self._answers.stopped):
self._answers.toggle_stop(True)
return None
if prev_question:
if prev_question[ACTION] == ACTION_TEXT_QUESTION:
self._answers.set(prev_question[ID], prev_answer)
elif prev_question[ACTION] == ACTION_YES_NO_QUESTION:
yes = estr(prev_answer, prev_question[YES])
no = estr(prev_answer, prev_question[NO])
if not (yes or no):
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], yes)
level = prev_question[ON_YES] if yes else prev_question[ON_NO]
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_CHOICES_QUESTION:
choice_id = prev_question[CHOICES].get(prev_answer)
if choice_id is None:
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], choice_id)
level = Level(prev_question[ON_CHOICES][choice_id])
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_LIST_QUESTION:
if not estr(prev_answer, prev_question[STOP_COMMAND]):
answers = self._answers.get(prev_question[ID])
if answers is None:
answers = []
self._answers.set(prev_question[ID], [])
if prev_answer:
self._answers.set(prev_question[ID], [*answers, prev_answer])
answers.append(prev_answer)
count = len(answers)
max_answers = prev_question[MAX_ANSWERS]
if count < max_answers:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER] + count)
return Result(text)
elif prev_question[ACTION] == ACTION_TEXT:
self._answers.set(prev_question[ID], True)
self._current_question = self._levels.get_next_question()
if self._current_question is not None:
text = None
if self._current_question[ACTION] != ACTION_LIST_QUESTION:
text = self._current_question[TEXT]
else:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER])
self._answers.set(self._current_question[ID], None)
return Result(text, self._current_question[ACTION] == ACTION_TEXT)
class ConversationsStorage:
def __init__(self):
self.conversations = {}
def add(self, cid, *args, **kwargs):
conversation = Conversation(*args, **kwargs)
self.conversations[cid] = conversation
return conversation
def get(self, cid):
return self.conversations.get(cid)
def set(self, cid, conversation):
self.conversations[cid] = conversation
def remove(self, cid):
return self.conversations.pop(cid, None)
def exists(self, cid):
conversation = self.get(cid)
return bool(conversation)
| true
| true
|
79073bd634d16949460f21e178b7a7148d81e4da
| 11,320
|
py
|
Python
|
tfx/dsl/components/common/importer.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | 1
|
2019-10-02T18:03:55.000Z
|
2019-10-02T18:03:55.000Z
|
tfx/dsl/components/common/importer.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/components/common/importer.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
# Constant to access importer importing result from importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access artifact uri from importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
"""Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
"""
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
"""
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
# TODO(b/161490287): remove static artifacts.
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
| 37.607973
| 79
| 0.716343
|
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
IMPORT_RESULT_KEY = 'result'
SOURCE_URI_KEY = 'artifact_uri'
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
| true
| true
|
79073c21ec44a19981f5782d5aa2bbc772dd1e48
| 4,542
|
py
|
Python
|
data/unaligned_dataset.py
|
sinhaharsh/pytorch-CycleGAN-and-pix2pix
|
7a38c79f4344c954dd28d041c82c121c92465d3d
|
[
"BSD-3-Clause"
] | 1
|
2021-03-29T03:10:32.000Z
|
2021-03-29T03:10:32.000Z
|
data/unaligned_dataset.py
|
sinhaharsh/pytorch-CycleGAN-and-pix2pix
|
7a38c79f4344c954dd28d041c82c121c92465d3d
|
[
"BSD-3-Clause"
] | null | null | null |
data/unaligned_dataset.py
|
sinhaharsh/pytorch-CycleGAN-and-pix2pix
|
7a38c79f4344c954dd28d041c82c121c92465d3d
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
| 41.290909
| 122
| 0.625055
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B')
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc
output_nc = self.opt.input_nc if btoA else self.opt.output_nc
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
A_path = self.A_paths[index % self.A_size]
if self.opt.serial_batches:
index_B = index % self.B_size
else:
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
return hs_data
| true
| true
|
79073c3b152de45bc8a13a44580f13646ff977c7
| 25,633
|
py
|
Python
|
util/dvsim/SimCfg.py
|
courageheart/opentitan
|
6d88a441d8f3aa9d948dd8fef66c6fb47527bf5a
|
[
"Apache-2.0"
] | null | null | null |
util/dvsim/SimCfg.py
|
courageheart/opentitan
|
6d88a441d8f3aa9d948dd8fef66c6fb47527bf5a
|
[
"Apache-2.0"
] | null | null | null |
util/dvsim/SimCfg.py
|
courageheart/opentitan
|
6d88a441d8f3aa9d948dd8fef66c6fb47527bf5a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""
Class describing simulation configuration object
"""
import os
import shutil
import subprocess
import sys
from collections import OrderedDict
import logging as log
from tabulate import tabulate
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
from testplanner import testplan_utils
from utils import VERBOSE, find_and_substitute_wildcards
def pick_dump_format(fmts):
'''Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
'''
assert fmts
fmt = fmts[0]
if fmt == 'fsdb' and not shutil.which('verdi'):
return pick_dump_format(fmts[1:])
return fmt
def resolve_dump_format(tool, dump):
'''Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None.
'''
assert tool is not None
SUPPORTED_DUMP_FMTS = {
'vcs': ['fsdb', 'vpd'],
'xcelium': ['fsdb', 'shm', 'vpd']
}
# Look up which dumping formats the tool supports
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if dump is not None:
# If the user has specified their preferred dumping format, use it. As
# a sanity check, error out if the chosen tool doesn't support the
# format, but only if we know about the tool. If not, we'll just assume
# they know what they're doing.
if fmts is not None and dump not in fmts:
log.error('Chosen tool ({}) does not support wave '
'dumping format {!r}.'
.format(tool, dump))
sys.exit(1)
return dump
# If the user hasn't specified a dumping format, but has asked for waves,
# we need to decide on a format for them. If fmts is None, we don't know
# about this tool. Maybe it's a new simulator, in which case, default to
# VPD and hope for the best.
if not fmts:
return 'vpd'
return pick_dump_format(fmts)
class SimCfg(FlowCfg):
"""Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
"""
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
# Options set from command line
self.tool = args.tool
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = args.build_modes.copy()
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.max_waves = args.max_waves
self.cov = args.cov
self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile or '(cfg uses profile without --profile)'
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.verbose = args.verbose
self.dry_run = args.dry_run
self.map_full_testplan = args.map_full_testplan
# Disable cov if --build-only is passed.
if self.build_only:
self.cov = False
# Set default sim modes for unpacking
if self.waves is True:
self.en_build_modes.append("waves")
if self.cov is True:
self.en_build_modes.append("cov")
if args.profile is not None:
self.en_build_modes.append("profile")
if self.xprop_off is not True:
self.en_build_modes.append("xprop")
# Options built from cfg_file files
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
# Options from tools - for building and running tests
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
# Generated data structures
self.links = {}
self.build_list = []
self.run_list = []
self.cov_merge_deploy = None
self.cov_report_deploy = None
self.results_summary = OrderedDict()
# If is_master_cfg is set, then each cfg will have its own cov_deploy.
# Maintain an array of those in cov_deploys.
self.cov_deploys = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
self._post_parse_flow_cfg()
# Choose a dump format now. Note that this has to happen after parsing
# the configuration format because our choice might depend on the
# chosen tool.
self.dump_fmt = (resolve_dump_format(self.tool, args.dump)
if self.waves else 'none')
# If build_unique is set, then add current timestamp to uniquify it
if self.build_unique:
self.build_dir += "_" + self.timestamp
# Process overrides before substituting the wildcards.
self._process_overrides()
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
"cov_db_dirs", "sw_test", "sw_test_is_prebuilt", "sw_build_device"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards,
self.is_master_cfg)
# Set the title for simulation results.
self.results_title = self.name.upper() + " Simulation Results"
# Stuff below only pertains to individual cfg (not master cfg)
# or individual selected cfgs (if select_cfgs is configured via command line)
# TODO: find a better way to support select_cfgs
if not self.is_master_cfg and (not self.select_cfgs or
self.name in self.select_cfgs):
# If self.tool is None at this point, there was no --tool argument on
# the command line, and there is no default tool set in the config
# file. That's ok if this is a master config (where the
# sub-configurations can choose tools themselves), but not otherwise.
if self.tool is None:
log.error('Config file does not specify a default tool, '
'and there was no --tool argument on the command line.')
sys.exit(1)
# Print info:
log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a master cfg obj
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
def kill(self):
'''kill running processes and jobs gracefully
'''
super().kill()
for item in self.cov_deploys:
item.kill()
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path:
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
self.run_modes = Modes.create_modes(RunModes, self.run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items:
pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error(
"Regression \"%s\" added for run contains tests that overlap with "
"other regressions added. This can result in conflicting "
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
# build_modes
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
# Process individual tests
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
# Merge the global build and run opts
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
# Check if all items have been processed
if items_list != []:
log.error(
"The items %s added for run were not found in \n%s!\n "
"Use the --list switch to see a list of available "
"tests / regressions.", items_list, self.flow_cfg_file)
# Process reseed override and create the build_list
build_list_names = []
for test in self.run_list:
# Override reseed if available.
if self.reseed_ovrd is not None:
test.reseed = self.reseed_ovrd
# Apply reseed multiplier if set on the command line.
test.reseed *= self.reseed_multiplier
# Create the unique set of builds needed.
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
'''Create initial set of directories
'''
# Invoking system calls has a performance penalty.
# Construct a single command line chained with '&&' to invoke
# the system call only once, rather than multiple times.
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
'''Create deploy objects from the build and run lists.
'''
# Create the build and run list first
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
# Create cov_merge and cov_report objects
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
# Generate reports only if merge was successful; add it as a dependency
# of merge.
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
# Create initial set of directories before kicking off the regression.
self._create_dirs()
def create_deploy_objects(self):
'''Public facing API for _create_deploy_objects().
'''
super().create_deploy_objects()
# Also, create cov_deploys
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy)
# deploy additional commands as needed. We do this separated for coverage
# since that needs to happen at the end.
def deploy_objects(self):
'''This is a public facing API, so we use "self.cfgs" instead of self.
'''
# Invoke the base class method to run the regression.
super().deploy_objects()
# If coverage is enabled, then deploy the coverage tasks.
if self.cov:
Deploy.deploy(self.cov_deploys)
def _cov_analyze(self):
'''Use the last regression coverage data to open up the GUI tool to
analyze the coverage.
'''
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy]
def cov_analyze(self):
'''Public facing API for analyzing coverage.
'''
for item in self.cfgs:
item._cov_analyze()
def _gen_results(self):
'''
The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
'''
# TODO: add support for html
def retrieve_result(name, results):
for item in results:
if name == item["name"]:
return item
return None
def gen_results_sub(items, results, fail_msgs):
'''
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
'''
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
# Generate results table for runs.
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P":
result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items,
regr_results, fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
self.errors_seen = True
# Generate results table for runs.
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
# Add path to testplan.
if hasattr(self, "testplan_doc_path"):
testplan = "https://" + self.doc_server + '/' + getattr(
self, "testplan_doc_path")
else:
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n"
results_str += "### Simulator: " + self.tool.upper() + "\n\n"
if regr_results == []:
results_str += "No results to display.\n"
else:
# TODO: check if testplan is not null?
# Map regr results to the testplan entries.
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
self.results_summary = self.testplan.results_summary
# Append coverage results of coverage was enabled.
if self.cov:
if self.cov_report_deploy.status == "P":
results_str += "\n## Coverage Results\n"
# Link the dashboard page using "cov_report_page" value.
if hasattr(self, "cov_report_page"):
results_str += "\n### [Coverage Dashboard]"
results_str += "({})\n\n".format(
getattr(self, "cov_report_page"))
results_str += self.cov_report_deploy.cov_results
self.results_summary[
"Coverage"] = self.cov_report_deploy.cov_total
else:
self.results_summary["Coverage"] = "--"
# append link of detail result to block name
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
# Append failures for triage
self.results_md = results_str + fail_msgs
results_str += fail_msgs
# Write results to the scratch area
results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
# Return only the tables
log.info("[results page]: [%s] [%s]", self.name, results_file)
return results_str
def gen_results_summary(self):
# sim summary result has 5 columns from each SimCfg.results_summary
header = ["Name", "Passing", "Total", "Pass Rate"]
if self.cov:
header.append('Coverage')
table = [header]
colalign = ("center", ) * len(header)
for item in self.cfgs:
row = []
for title in item.results_summary:
row.append(item.results_summary[title])
if row == []:
continue
table.append(row)
self.results_summary_md = "## " + self.results_title + " (Summary)\n"
self.results_summary_md += "### " + self.timestamp_long + "\n"
self.results_summary_md += tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=colalign)
print(self.results_summary_md)
return self.results_summary_md
def _publish_results(self):
'''Publish coverage results to the opentitan web server.'''
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
log.info("Publishing coverage results to %s",
results_server_dir_url)
cmd = (self.results_server_cmd + " -m cp -R " +
self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e,
str(cmd))
| 39.314417
| 91
| 0.576132
|
import os
import shutil
import subprocess
import sys
from collections import OrderedDict
import logging as log
from tabulate import tabulate
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
from testplanner import testplan_utils
from utils import VERBOSE, find_and_substitute_wildcards
def pick_dump_format(fmts):
assert fmts
fmt = fmts[0]
if fmt == 'fsdb' and not shutil.which('verdi'):
return pick_dump_format(fmts[1:])
return fmt
def resolve_dump_format(tool, dump):
assert tool is not None
SUPPORTED_DUMP_FMTS = {
'vcs': ['fsdb', 'vpd'],
'xcelium': ['fsdb', 'shm', 'vpd']
}
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if dump is not None:
# format, but only if we know about the tool. If not, we'll just assume
if fmts is not None and dump not in fmts:
log.error('Chosen tool ({}) does not support wave '
'dumping format {!r}.'
.format(tool, dump))
sys.exit(1)
return dump
# If the user hasn't specified a dumping format, but has asked for waves,
# about this tool. Maybe it's a new simulator, in which case, default to
if not fmts:
return 'vpd'
return pick_dump_format(fmts)
class SimCfg(FlowCfg):
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
self.tool = args.tool
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = args.build_modes.copy()
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.max_waves = args.max_waves
self.cov = args.cov
self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile or '(cfg uses profile without --profile)'
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.verbose = args.verbose
self.dry_run = args.dry_run
self.map_full_testplan = args.map_full_testplan
if self.build_only:
self.cov = False
if self.waves is True:
self.en_build_modes.append("waves")
if self.cov is True:
self.en_build_modes.append("cov")
if args.profile is not None:
self.en_build_modes.append("profile")
if self.xprop_off is not True:
self.en_build_modes.append("xprop")
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
self.links = {}
self.build_list = []
self.run_list = []
self.cov_merge_deploy = None
self.cov_report_deploy = None
self.results_summary = OrderedDict()
self.cov_deploys = []
self.parse_flow_cfg(flow_cfg_file)
self._post_parse_flow_cfg()
self.dump_fmt = (resolve_dump_format(self.tool, args.dump)
if self.waves else 'none')
if self.build_unique:
self.build_dir += "_" + self.timestamp
self._process_overrides()
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
"cov_db_dirs", "sw_test", "sw_test_is_prebuilt", "sw_build_device"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards,
self.is_master_cfg)
self.results_title = self.name.upper() + " Simulation Results"
if not self.is_master_cfg and (not self.select_cfgs or
self.name in self.select_cfgs):
# sub-configurations can choose tools themselves), but not otherwise.
if self.tool is None:
log.error('Config file does not specify a default tool, '
'and there was no --tool argument on the command line.')
sys.exit(1)
# Print info:
log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a master cfg obj
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
def kill(self):
super().kill()
for item in self.cov_deploys:
item.kill()
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path:
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
self.run_modes = Modes.create_modes(RunModes, self.run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items:
pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error(
"Regression \"%s\" added for run contains tests that overlap with "
"other regressions added. This can result in conflicting "
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
if items_list != []:
log.error(
"The items %s added for run were not found in \n%s!\n "
"Use the --list switch to see a list of available "
"tests / regressions.", items_list, self.flow_cfg_file)
build_list_names = []
for test in self.run_list:
if self.reseed_ovrd is not None:
test.reseed = self.reseed_ovrd
test.reseed *= self.reseed_multiplier
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
self._create_dirs()
def create_deploy_objects(self):
super().create_deploy_objects()
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy)
def deploy_objects(self):
super().deploy_objects()
if self.cov:
Deploy.deploy(self.cov_deploys)
def _cov_analyze(self):
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy]
def cov_analyze(self):
for item in self.cfgs:
item._cov_analyze()
def _gen_results(self):
def retrieve_result(name, results):
for item in results:
if name == item["name"]:
return item
return None
def gen_results_sub(items, results, fail_msgs):
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P":
result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items,
regr_results, fail_msgs)
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
self.errors_seen = True
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
if hasattr(self, "testplan_doc_path"):
testplan = "https://" + self.doc_server + '/' + getattr(
self, "testplan_doc_path")
else:
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n"
results_str += "### Simulator: " + self.tool.upper() + "\n\n"
if regr_results == []:
results_str += "No results to display.\n"
else:
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
self.results_summary = self.testplan.results_summary
if self.cov:
if self.cov_report_deploy.status == "P":
results_str += "\n## Coverage Results\n"
if hasattr(self, "cov_report_page"):
results_str += "\n### [Coverage Dashboard]"
results_str += "({})\n\n".format(
getattr(self, "cov_report_page"))
results_str += self.cov_report_deploy.cov_results
self.results_summary[
"Coverage"] = self.cov_report_deploy.cov_total
else:
self.results_summary["Coverage"] = "--"
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
self.results_md = results_str + fail_msgs
results_str += fail_msgs
results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
log.info("[results page]: [%s] [%s]", self.name, results_file)
return results_str
def gen_results_summary(self):
header = ["Name", "Passing", "Total", "Pass Rate"]
if self.cov:
header.append('Coverage')
table = [header]
colalign = ("center", ) * len(header)
for item in self.cfgs:
row = []
for title in item.results_summary:
row.append(item.results_summary[title])
if row == []:
continue
table.append(row)
self.results_summary_md = "## " + self.results_title + " (Summary)\n"
self.results_summary_md += "### " + self.timestamp_long + "\n"
self.results_summary_md += tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=colalign)
print(self.results_summary_md)
return self.results_summary_md
def _publish_results(self):
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
log.info("Publishing coverage results to %s",
results_server_dir_url)
cmd = (self.results_server_cmd + " -m cp -R " +
self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e,
str(cmd))
| true
| true
|
79073d8868d789aecb12c3135ea15d4fafab4d85
| 7,610
|
py
|
Python
|
rmgpy/molecule/converterTest.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
rmgpy/molecule/converterTest.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
rmgpy/molecule/converterTest.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit test for the converter module.
"""
import unittest
from rmgpy.molecule.converter import debug_rdkit_mol, to_rdkit_mol, from_rdkit_mol, to_ob_mol, from_ob_mol
from rmgpy.molecule.molecule import Molecule
class RDKitTest(unittest.TestCase):
def test_debugger(self):
"""Test the debug_rdkit_mol(rdmol) function doesn't crash
We can't really test it in the unit testing framework, because
that already captures and redirects standard output, and that
conflicts with the function, but this checks it doesn't crash.
"""
import rdkit.Chem
import logging
rdmol = rdkit.Chem.MolFromSmiles('CCC')
message = debug_rdkit_mol(rdmol, level=logging.INFO)
self.assertIsNotNone(message)
def test_lone_pair_retention(self):
"""Test that we don't lose any lone pairs on round trip RDKit conversion."""
mol = Molecule().from_adjacency_list("""
1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")
rdmol = to_rdkit_mol(mol)
mol2 = from_rdkit_mol(Molecule(), rdmol)
self.assertTrue(mol.is_isomorphic(mol2))
def test_atom_mapping_1(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings."""
bond_order_dict = {'SINGLE': 1, 'DOUBLE': 2, 'TRIPLE': 3, 'AROMATIC': 1.5}
mol = Molecule().from_smiles('C1CCC=C1C=O')
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=False, return_mapping=True)
for atom in mol.atoms:
# Check that all atoms are found in mapping
self.assertTrue(atom in rd_atom_indices)
# Check that all bonds are in rdkitmol with correct mapping and order
for connected_atom, bond in atom.bonds.items():
bond_type = str(rdkitmol.GetBondBetweenAtoms(rd_atom_indices[atom],
rd_atom_indices[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
# Test for remove_h = True
rdkitmol2, rd_atom_indices2 = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
for atom in mol.atoms:
# Check that all non-hydrogen atoms are found in mapping
if atom.symbol != 'H':
self.assertTrue(atom in rd_atom_indices2)
# Check that all bonds connected to non-hydrogen have the correct mapping and order
for connected_atom, bond in atom.bonds.items():
if connected_atom.symbol != 'H':
bond_type = str(rdkitmol2.GetBondBetweenAtoms(rd_atom_indices2[atom],
rd_atom_indices2[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
def test_atom_mapping_2(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings when hydrogens are removed."""
adjlist = """
1 H u0 p0 c0 {2,S}
2 C u0 p0 c0 {1,S} {3,S} {4,S} {5,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {2,S} {6,S}
6 H u0 p0 c0 {5,S}
"""
mol = Molecule().from_adjacency_list(adjlist)
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
heavy_atoms = [at for at in mol.atoms if at.number != 1]
for at1 in heavy_atoms:
for at2 in heavy_atoms:
if mol.has_bond(at1, at2):
try:
rdkitmol.GetBondBetweenAtoms(rd_atom_indices[at1], rd_atom_indices[at2])
except RuntimeError:
self.fail("RDKit failed in finding the bond in the original atom!")
class ConverterTest(unittest.TestCase):
def setUp(self):
"""Function run before each test in this class."""
self.test_mols = [
Molecule().from_smiles('C'),
Molecule().from_smiles('O'),
Molecule().from_smiles('N'),
Molecule().from_smiles('S'),
Molecule().from_smiles('[CH2]C'),
Molecule().from_smiles('[CH]C'),
Molecule().from_smiles('C=CC=C'),
Molecule().from_smiles('C#C[CH2]'),
Molecule().from_smiles('c1ccccc1'),
Molecule().from_smiles('[13CH3]C'),
Molecule().from_smiles('O=CCO').generate_h_bonded_structures()[0],
]
self.test_Hbond_free_mol = Molecule().from_smiles('O=CCO')
def test_rdkit_round_trip(self):
"""Test conversion to and from RDKitMol"""
for mol in self.test_mols:
rdkit_mol = to_rdkit_mol(mol)
new_mol = from_rdkit_mol(Molecule(), rdkit_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
def test_ob_round_trip(self):
"""Test conversion to and from OBMol"""
for mol in self.test_mols:
ob_mol = to_ob_mol(mol)
new_mol = from_ob_mol(Molecule(), ob_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
| 48.782051
| 118
| 0.574376
| true
| true
|
|
79073ec29185452124100779ea022dcfedd7983c
| 3,624
|
py
|
Python
|
Resources/books/deep_learning_time_series_forecasting/code/chapter_14/03_cnn_forecast_model.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
Resources/books/deep_learning_time_series_forecasting/code/chapter_14/03_cnn_forecast_model.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
Resources/books/deep_learning_time_series_forecasting/code/chapter_14/03_cnn_forecast_model.py
|
gdepalma93/bright-athlete-academy
|
54ba0cc6633637c1bd6d90120153e04b981244bf
|
[
"MIT"
] | null | null | null |
# evaluate cnn for monthly car sales dataset
from math import sqrt
from numpy import array
from numpy import mean
from numpy import std
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from matplotlib import pyplot
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
# transform list into supervised learning format
def series_to_supervised(data, n_in, n_out=1):
df = DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = concat(cols, axis=1)
# drop rows with NaN values
agg.dropna(inplace=True)
return agg.values
# root mean squared error or rmse
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# fit a model
def model_fit(train, config):
# unpack config
n_input, n_filters, n_kernel, n_epochs, n_batch = config
# prepare data
data = series_to_supervised(train, n_input)
train_x, train_y = data[:, :-1], data[:, -1]
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1))
# define model
model = Sequential()
model.add(Conv1D(n_filters, n_kernel, activation='relu', input_shape=(n_input, 1)))
model.add(Conv1D(n_filters, n_kernel, activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
# fit
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)
return model
# forecast with a pre-fit model
def model_predict(model, history, config):
# unpack config
n_input, _, _, _, _ = config
# prepare data
x_input = array(history[-n_input:]).reshape((1, n_input, 1))
# forecast
yhat = model.predict(x_input, verbose=0)
return yhat[0]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, cfg):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# fit model
model = model_fit(train, cfg)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = model_predict(model, history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# estimate prediction error
error = measure_rmse(test, predictions)
print(' > %.3f' % error)
return error
# repeat evaluation of a config
def repeat_evaluate(data, config, n_test, n_repeats=30):
# fit and evaluate the model n times
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)]
return scores
# summarize model performance
def summarize_scores(name, scores):
# print a summary
scores_m, score_std = mean(scores), std(scores)
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std))
# box and whisker plot
pyplot.boxplot(scores)
pyplot.show()
series = read_csv('monthly-car-sales.csv', header=0, index_col=0)
data = series.values
# data split
n_test = 12
# define config
config = [36, 256, 3, 100, 100]
# grid search
scores = repeat_evaluate(data, config, n_test)
# summarize scores
summarize_scores('cnn', scores)
| 30.974359
| 84
| 0.74117
|
from math import sqrt
from numpy import array
from numpy import mean
from numpy import std
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from matplotlib import pyplot
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
def series_to_supervised(data, n_in, n_out=1):
df = DataFrame(data)
cols = list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
for i in range(0, n_out):
cols.append(df.shift(-i))
agg = concat(cols, axis=1)
agg.dropna(inplace=True)
return agg.values
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
def model_fit(train, config):
n_input, n_filters, n_kernel, n_epochs, n_batch = config
data = series_to_supervised(train, n_input)
train_x, train_y = data[:, :-1], data[:, -1]
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1))
model = Sequential()
model.add(Conv1D(n_filters, n_kernel, activation='relu', input_shape=(n_input, 1)))
model.add(Conv1D(n_filters, n_kernel, activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0)
return model
def model_predict(model, history, config):
n_input, _, _, _, _ = config
x_input = array(history[-n_input:]).reshape((1, n_input, 1))
yhat = model.predict(x_input, verbose=0)
return yhat[0]
def walk_forward_validation(data, n_test, cfg):
predictions = list()
train, test = train_test_split(data, n_test)
model = model_fit(train, cfg)
history = [x for x in train]
for i in range(len(test)):
yhat = model_predict(model, history, cfg)
predictions.append(yhat)
history.append(test[i])
error = measure_rmse(test, predictions)
print(' > %.3f' % error)
return error
def repeat_evaluate(data, config, n_test, n_repeats=30):
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)]
return scores
def summarize_scores(name, scores):
scores_m, score_std = mean(scores), std(scores)
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std))
pyplot.boxplot(scores)
pyplot.show()
series = read_csv('monthly-car-sales.csv', header=0, index_col=0)
data = series.values
n_test = 12
config = [36, 256, 3, 100, 100]
scores = repeat_evaluate(data, config, n_test)
summarize_scores('cnn', scores)
| true
| true
|
79073eef787b92302619125fa418bcfd0045d283
| 2,947
|
py
|
Python
|
UnityBuilder.py
|
christian-stockinger/UnityBuilder
|
d1a893eeb561c49475f3b2a0bde21c6d6b468153
|
[
"MIT"
] | 4
|
2019-10-10T05:50:04.000Z
|
2022-02-08T06:26:14.000Z
|
UnityBuilder.py
|
christian-stockinger/UnityBuilder
|
d1a893eeb561c49475f3b2a0bde21c6d6b468153
|
[
"MIT"
] | 2
|
2018-11-21T12:52:41.000Z
|
2018-11-21T12:55:46.000Z
|
UnityBuilder.py
|
christian-stockinger/UnityBuilder
|
d1a893eeb561c49475f3b2a0bde21c6d6b468153
|
[
"MIT"
] | 3
|
2019-05-29T14:03:31.000Z
|
2022-02-08T06:26:18.000Z
|
import platform
import subprocess
import sys
from optparse import OptionParser
from util import fileLogger
from util import logger
def parse_start_arguments():
parser = OptionParser()
parser.add_option("--unityPath", dest="UnityPath", default=True, help="Path to Unity application")
parser.add_option("--projectPath", dest="ProjectPath", default=True, help="Path to Unity Project")
parser.add_option("--logPath", dest="LogPath", default=True, help="Path to Unity Log File")
parser.add_option("-e", "--executionMessage", dest="ExecutionMethod", default=True, help="Execution method after unit started completly")
parser.add_option("-t", "--target", dest="Target", help="Build Target of the Build")
parser.add_option("--noTimer", dest="NoTimer", action='store_true', help="no timestamp should be displayed")
(options, args) = parser.parse_args()
return options
def detect_os():
operation_system = platform.system()
LOGGER.info("Detected " + operation_system + " as Operation System")
return operation_system
options = parse_start_arguments()
LOGGER = logger.Logger(options.NoTimer)
os = detect_os()
def start_unity_build_command():
LOGGER.info("Start Unity Build")
try:
build_command = options.UnityPath + " -projectPath " + options.ProjectPath + \
" -logfile " + options.LogPath + \
" -buildTarget " + options.Target + \
" -quit " \
"-batchmode " \
"-nographics " \
"-executeMethod " + options.ExecutionMethod
if os != "Windows":
process = subprocess.Popen(build_command, shell=True, stdout=subprocess.PIPE)
process.wait()
else:
subprocess.call(build_command)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
def cleanup_unity_process():
try:
LOGGER.info("Cleaning up Unity process")
if os == "Windows":
subprocess.call(r'TASKKILL /F /IM Unity.exe', stderr=subprocess.PIPE)
except subprocess.CalledProcessError as error:
LOGGER.warn("Couldn't kill unity " + str(error))
def cleanup_old_logfile():
try:
open(options.LogPath, 'w').close()
LOGGER.info("old log cleared")
except FileNotFoundError:
LOGGER.info("No old log file was found")
try:
LOGGER.log("DEBUG", "Starting with arguments: " + str(options))
LOGGER.info("Cleaning old logfile")
cleanup_old_logfile()
LOGGER.info("Read logfile tailing")
logfile = fileLogger.ContinuousFileLogger(options.LogPath, options.NoTimer)
logfile.start()
LOGGER.info("Start unity")
start_unity_build_command()
LOGGER.info("Cleanup Processes")
cleanup_unity_process()
LOGGER.info("Cleanup logger")
logfile.stop()
except Exception as e:
LOGGER.error("Failed to start a thread" + str(e))
| 34.267442
| 141
| 0.657279
|
import platform
import subprocess
import sys
from optparse import OptionParser
from util import fileLogger
from util import logger
def parse_start_arguments():
parser = OptionParser()
parser.add_option("--unityPath", dest="UnityPath", default=True, help="Path to Unity application")
parser.add_option("--projectPath", dest="ProjectPath", default=True, help="Path to Unity Project")
parser.add_option("--logPath", dest="LogPath", default=True, help="Path to Unity Log File")
parser.add_option("-e", "--executionMessage", dest="ExecutionMethod", default=True, help="Execution method after unit started completly")
parser.add_option("-t", "--target", dest="Target", help="Build Target of the Build")
parser.add_option("--noTimer", dest="NoTimer", action='store_true', help="no timestamp should be displayed")
(options, args) = parser.parse_args()
return options
def detect_os():
operation_system = platform.system()
LOGGER.info("Detected " + operation_system + " as Operation System")
return operation_system
options = parse_start_arguments()
LOGGER = logger.Logger(options.NoTimer)
os = detect_os()
def start_unity_build_command():
LOGGER.info("Start Unity Build")
try:
build_command = options.UnityPath + " -projectPath " + options.ProjectPath + \
" -logfile " + options.LogPath + \
" -buildTarget " + options.Target + \
" -quit " \
"-batchmode " \
"-nographics " \
"-executeMethod " + options.ExecutionMethod
if os != "Windows":
process = subprocess.Popen(build_command, shell=True, stdout=subprocess.PIPE)
process.wait()
else:
subprocess.call(build_command)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
def cleanup_unity_process():
try:
LOGGER.info("Cleaning up Unity process")
if os == "Windows":
subprocess.call(r'TASKKILL /F /IM Unity.exe', stderr=subprocess.PIPE)
except subprocess.CalledProcessError as error:
LOGGER.warn("Couldn't kill unity " + str(error))
def cleanup_old_logfile():
try:
open(options.LogPath, 'w').close()
LOGGER.info("old log cleared")
except FileNotFoundError:
LOGGER.info("No old log file was found")
try:
LOGGER.log("DEBUG", "Starting with arguments: " + str(options))
LOGGER.info("Cleaning old logfile")
cleanup_old_logfile()
LOGGER.info("Read logfile tailing")
logfile = fileLogger.ContinuousFileLogger(options.LogPath, options.NoTimer)
logfile.start()
LOGGER.info("Start unity")
start_unity_build_command()
LOGGER.info("Cleanup Processes")
cleanup_unity_process()
LOGGER.info("Cleanup logger")
logfile.stop()
except Exception as e:
LOGGER.error("Failed to start a thread" + str(e))
| true
| true
|
79073ef939a5f6202ae68182d6333ac36ac8c3c0
| 3,298
|
py
|
Python
|
mitmirror/presenters/controllers/users/update_user_controller.py
|
Claayton/mitmirror-api
|
a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1
|
[
"MIT"
] | null | null | null |
mitmirror/presenters/controllers/users/update_user_controller.py
|
Claayton/mitmirror-api
|
a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1
|
[
"MIT"
] | 1
|
2021-10-09T20:42:03.000Z
|
2021-10-09T20:42:03.000Z
|
mitmirror/presenters/controllers/users/update_user_controller.py
|
Claayton/mitmirror-api
|
a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1
|
[
"MIT"
] | null | null | null |
"""Controller para UpdateUser"""
from typing import Type, Optional
from datetime import datetime
from mitmirror.domain.usecases import UpdateUserInterface
from mitmirror.domain.models import User
from mitmirror.presenters.interfaces import ControllerInterface
from mitmirror.presenters.helpers import HttpRequest, HttpResponse
from mitmirror.errors import (
HttpBadRequestError,
DefaultError,
HttpNotFound,
HttpUnprocessableEntity,
)
class UpdateUserController(ControllerInterface):
"""Controller para o caso de uso UpdateUser"""
def __init__(self, usecase: Type[UpdateUserInterface]) -> None:
self.__usecase = usecase
def handler(
self, param: Optional[any] = None, http_request: Type[HttpRequest] = None
) -> HttpResponse:
"""Metodo para chamar o caso de uso"""
response = None
if not param:
raise HttpBadRequestError(
message="Essa requisiçao exige o seguinte parametro: <int:user_id>, error!"
)
if not str(param).isnumeric():
raise HttpUnprocessableEntity(
message="O parametro <user_id> deve ser do tipo inteiro, error!"
)
try:
response = None
if not http_request.body:
raise DefaultError(type_error=400)
name = http_request.body.get("name", None)
email = http_request.body.get("email", None)
username = http_request.body.get("username", None)
password = http_request.body.get("password", None)
response = self.__usecase.update(
user_id=param,
name=name,
email=email,
username=username,
password=password,
)
return self.__format_response(response["data"])
except DefaultError as error:
if error.type_error == 400:
raise HttpBadRequestError(
message="Esta requisicao precisa dos seguintes parametros:\
<str:name>, <str:email>, <str:username>, <any:password>, error!"
) from error
if error.type_error == 404:
raise HttpNotFound(message="Usuario nao encontrado, error!") from error
raise error
except Exception as error:
raise error
@classmethod
def __format_response(cls, response_method: Type[User]) -> HttpResponse:
"""Formatando a resposta"""
response = {
"message": "Informacoes do usuario atualizadas com sucesso!",
"data": {
"id": response_method.id,
"name": response_method.name,
"email": response_method.email,
"username": response_method.username,
"password_hash": "Nao mostramos isso aqui!",
"secundary_id": response_method.secundary_id,
"is_staff": response_method.is_staff,
"is_active_user": response_method.is_active_user,
"last_login": datetime.isoformat(response_method.last_login),
"date_joined": datetime.isoformat(response_method.date_joined),
},
}
return HttpResponse(status_code=200, body=response)
| 31.409524
| 91
| 0.604912
|
from typing import Type, Optional
from datetime import datetime
from mitmirror.domain.usecases import UpdateUserInterface
from mitmirror.domain.models import User
from mitmirror.presenters.interfaces import ControllerInterface
from mitmirror.presenters.helpers import HttpRequest, HttpResponse
from mitmirror.errors import (
HttpBadRequestError,
DefaultError,
HttpNotFound,
HttpUnprocessableEntity,
)
class UpdateUserController(ControllerInterface):
def __init__(self, usecase: Type[UpdateUserInterface]) -> None:
self.__usecase = usecase
def handler(
self, param: Optional[any] = None, http_request: Type[HttpRequest] = None
) -> HttpResponse:
response = None
if not param:
raise HttpBadRequestError(
message="Essa requisiçao exige o seguinte parametro: <int:user_id>, error!"
)
if not str(param).isnumeric():
raise HttpUnprocessableEntity(
message="O parametro <user_id> deve ser do tipo inteiro, error!"
)
try:
response = None
if not http_request.body:
raise DefaultError(type_error=400)
name = http_request.body.get("name", None)
email = http_request.body.get("email", None)
username = http_request.body.get("username", None)
password = http_request.body.get("password", None)
response = self.__usecase.update(
user_id=param,
name=name,
email=email,
username=username,
password=password,
)
return self.__format_response(response["data"])
except DefaultError as error:
if error.type_error == 400:
raise HttpBadRequestError(
message="Esta requisicao precisa dos seguintes parametros:\
<str:name>, <str:email>, <str:username>, <any:password>, error!"
) from error
if error.type_error == 404:
raise HttpNotFound(message="Usuario nao encontrado, error!") from error
raise error
except Exception as error:
raise error
@classmethod
def __format_response(cls, response_method: Type[User]) -> HttpResponse:
response = {
"message": "Informacoes do usuario atualizadas com sucesso!",
"data": {
"id": response_method.id,
"name": response_method.name,
"email": response_method.email,
"username": response_method.username,
"password_hash": "Nao mostramos isso aqui!",
"secundary_id": response_method.secundary_id,
"is_staff": response_method.is_staff,
"is_active_user": response_method.is_active_user,
"last_login": datetime.isoformat(response_method.last_login),
"date_joined": datetime.isoformat(response_method.date_joined),
},
}
return HttpResponse(status_code=200, body=response)
| true
| true
|
79073f3f97130d31fc71762545122d48683a6122
| 24,686
|
py
|
Python
|
datacube/index/_datasets.py
|
cronosnull/agdc-v2
|
596923779d3650c47a6b43276b3369a5ec619158
|
[
"Apache-2.0"
] | 1
|
2015-08-24T18:16:41.000Z
|
2015-08-24T18:16:41.000Z
|
datacube/index/_datasets.py
|
cronosnull/agdc-v2
|
596923779d3650c47a6b43276b3369a5ec619158
|
[
"Apache-2.0"
] | null | null | null |
datacube/index/_datasets.py
|
cronosnull/agdc-v2
|
596923779d3650c47a6b43276b3369a5ec619158
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
"""
API for dataset indexing, access and search.
"""
from __future__ import absolute_import
import logging
from cachetools.func import lru_cache
from datacube import compat
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, check_doc_unchanged, jsonify_document, get_doc_changes, contains
from . import fields
from .exceptions import DuplicateRecordError, UnknownFieldError
_LOG = logging.getLogger(__name__)
class MetadataTypeResource(object):
def __init__(self, db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
self._db = db
def add(self, definition, allow_table_lock=False):
"""
:type definition: dict
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
# This column duplication is getting out of hand:
MetadataType.validate(definition)
name = definition['name']
existing = self._db.get_metadata_type_by_name(name)
if existing:
# They've passed us the same one again. Make sure it matches what is stored.
# TODO: Support for adding/updating search fields?
check_doc_unchanged(
existing.definition,
definition,
'Metadata Type {}'.format(name)
)
else:
self._db.add_metadata_type(
name=name,
definition=definition,
concurrently=not allow_table_lock
)
return self.get_by_name(name)
@lru_cache()
def get(self, id_):
"""
:rtype: datacube.model.MetadataType
"""
return self._make(self._db.get_metadata_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
:rtype: datacube.model.MetadataType
"""
record = self._db.get_metadata_type_by_name(name)
if not record:
return None
return self._make(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=False):
"""
Create or replace per-field indexes and views.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
"""
self._db.check_dynamic_fields(concurrently=not allow_table_lock, rebuild_all=rebuild_all)
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype list[datacube.model.MetadataType]
"""
definition = query_row['definition']
dataset_ = definition['dataset']
return MetadataType(
query_row['name'],
dataset_,
dataset_search_fields=self._db.get_dataset_fields(query_row),
id_=query_row['id']
)
class DatasetTypeResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
"""
Create a Product from its definitions
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.add(metadata_type, allow_table_lock=False)
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_):
"""
Add a Product
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if existing:
# TODO: Support for adding/updating match rules?
# They've passed us the same collection again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Dataset type {}'.format(type_.name)
)
else:
self._db.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
return self.get_by_name(type_.name)
def update(self, type_, allow_unsafe_updates=False):
"""
Update a product. Unsafe changes will throw a ValueError by default.
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType type_: Product to add
:param allow_unsafe_updates bool: Allow unsafe changes. Use with caution.
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % type_.name)
def handle_unsafe(msg):
if not allow_unsafe_updates:
raise ValueError(msg)
else:
_LOG.warning("Ignoring %s", msg)
# We'll probably want to use offsets in the future (ie. nested dicts), not just keys, but for now this suffices.
safe_keys_to_change = ('description', 'metadata')
doc_changes = get_doc_changes(existing.definition, jsonify_document(type_.definition))
for offset, old_value, new_value in doc_changes:
_LOG.info('Changing %s %s: %r -> %r', type_.name, '.'.join(offset), old_value, new_value)
key_name = offset[0]
if key_name not in safe_keys_to_change:
handle_unsafe('Potentially unsafe update: changing %r of product definition.' % key_name)
# You can safely make the match rules looser but not tighter.
if key_name == 'metadata':
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
if not contains(old_value, new_value, case_sensitive=True):
handle_unsafe('Unsafe update: new product match rules are not a superset of old ones.')
if doc_changes:
_LOG.info("Updating product %s", type_.name)
self._db.update_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
# Clear our local cache. Note that other users may still have
# cached copies for the duration of their connections.
self.get_by_name.cache_clear()
self.get.cache_clear()
else:
_LOG.info("No changes detected for product %s", type_.name)
def update_document(self, definition, allow_unsafe_update=False):
"""
Update a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.update(type_, allow_unsafe_updates=allow_unsafe_update)
def add_document(self, definition):
"""
Add a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.add(type_)
@lru_cache()
def get(self, id_):
"""
Retrieve Product by id
:param int id_: id of the Product
:rtype: datacube.model.DatasetType
"""
return self._make(self._db.get_dataset_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
Retrieve Product by name
:param str name: name of the Product
:rtype: datacube.model.DatasetType
"""
result = self._db.get_dataset_type_by_name(name)
if not result:
return None
return self._make(result)
def get_with_fields(self, field_names):
"""
Return dataset types that have all the given fields.
:param tuple[str] field_names:
:rtype: __generator[DatasetType]
"""
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
"""
Return dataset types that have all the given fields.
:param dict query:
:rtype: __generator[DatasetType]
"""
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
"""
Return dataset types that match match-able fields and dict of remaining un-matchable fields.
:param dict query:
:rtype: __generator[(DatasetType, dict)]
"""
for type_ in self.get_all():
q = query.copy()
if q.pop('product', type_.name) != type_.name:
continue
if q.pop('metadata_type', type_.metadata_type.name) != type_.metadata_type.name:
continue
for key, value in list(q.items()):
try:
exprs = fields.to_expressions(type_.metadata_type.dataset_fields.get, **{key: value})
except UnknownFieldError as e:
break
try:
if all(expr.evaluate(type_.metadata_doc) for expr in exprs):
q.pop(key)
else:
break
except (AttributeError, KeyError, ValueError) as e:
continue
else:
yield type_, q
def get_all(self):
"""
Retrieve all Products
:rtype: iter[datacube.model.DatasetType]
"""
return (self._make(record) for record in self._db.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype datacube.model.DatasetType
"""
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type types: datacube.index._datasets.DatasetTypeResource
"""
def __init__(self, db, dataset_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type dataset_type_resource: datacube.index._datasets.DatasetTypeResource
"""
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
"""
Get dataset by id
:param uuid id_: id of the dataset to retrieve
:param bool include_sources: get the full provenance graph?
:rtype: datacube.model.Dataset
"""
if not include_sources:
return self._make(self._db.get_dataset(id_), full_info=True)
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in self._db.get_dataset_sources(id_)}
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[str(source)][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[str(source)][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
"""
Get drived datasets
:param uuid id_: dataset id
:rtype: list[datacube.model.Dataset]
"""
return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset):
"""
Have we already indexed this dataset?
:param datacube.model.Dataset dataset: dataset to check
:rtype: bool
"""
return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False):
"""
Ensure a dataset is in the index. Add it if not present.
:param datacube.model.Dataset dataset: dataset to add
:param bool skip_sources: don't attempt to index source (use when sources are already indexed)
:rtype: datacube.model.Dataset
"""
if not skip_sources:
for source in dataset.sources.values():
self.add(source)
was_inserted = False
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
# if insertion succeeds the location bit can't possibly fail
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
if not was_inserted:
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
self._db.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
"""
Mark datasets as archived
:param list[uuid] ids: list of dataset ids to archive
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.archive_dataset(id_)
def restore(self, ids):
"""
Mark datasets as not archived
:param list[uuid] ids: list of dataset ids to restore
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.restore_dataset(id_)
def get_field_names(self, type_name=None):
"""
:param str type_name:
:rtype: __generator[str]
"""
if type_name is None:
types = self.types.get_all()
else:
types = [self.types.get_by_name(type_name)]
for type_ in types:
for name in type_.metadata_type.dataset_fields:
yield name
def get_locations(self, dataset):
"""
:param datacube.model.Dataset dataset: dataset
:rtype: list[str]
"""
return self._db.get_locations(dataset.id)
def _make(self, dataset_res, full_info=False):
"""
:rtype datacube.model.Dataset
:param bool full_info: Include all available fields
"""
return Dataset(
self.types.get(dataset_res.dataset_type_ref),
dataset_res.metadata,
dataset_res.local_uri,
indexed_by=dataset_res.added_by if full_info else None,
indexed_time=dataset_res.added if full_info else None
)
def _make_many(self, query_result):
"""
:rtype list[datacube.model.Dataset]
"""
return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata):
"""
Perform a search using arbitrary metadata, returning results as Dataset objects.
Caution – slow! This will usually not use indexes.
:param dict metadata:
:rtype: list[datacube.model.Dataset]
"""
return self._make_many(self._db.search_datasets_by_metadata(metadata))
def search(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[datacube.model.Dataset]
"""
for dataset_type, datasets in self._do_search_by_product(query):
for dataset in self._make_many(datasets):
yield dataset
def search_by_product(self, **query):
"""
Perform a search, returning datasets grouped by product type.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]
"""
for dataset_type, datasets in self._do_search_by_product(query):
yield dataset_type, self._make_many(datasets)
def count(self, **query):
"""
Perform a search, returning count of results.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: int
"""
# This may be optimised into one query in the future.
result = 0
for product_type, count in self._do_count_by_product(query):
result += count
return result
def count_by_product(self, **query):
"""
Perform a search, returning a count of for each matching product type.
:param dict[str,str|float|datacube.model.Range] query:
:returns: Sequence of (product, count)
:rtype: __generator[(datacube.model.DatasetType, int)]]
"""
return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query):
"""
Perform a search, returning counts for each product grouped in time slices
of the given period.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]
"""
return self._do_time_count(period, query)
def count_product_through_time(self, period, **query):
"""
Perform a search, returning counts for a single product grouped in time slices
of the given period.
Will raise an error if the search terms match more than one product.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]
"""
return next(self._do_time_count(period, query, ensure_single=True))[1]
def _get_dataset_types(self, q):
types = set()
if 'product' in q.keys():
types.add(self.types.get_by_name(q['product']))
else:
# Otherwise search any metadata type that has all the given search fields.
types = self.types.get_with_fields(tuple(q.keys()))
if not types:
raise ValueError('No type of dataset has fields: %r', tuple(q.keys()))
return types
def _get_product_queries(self, query):
for dataset_type, q in self.types.search_robust(**query):
q['dataset_type_id'] = dataset_type.id
yield q, dataset_type
def _do_search_by_product(self, query, return_fields=False, with_source_ids=False):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
select_fields = None
if return_fields:
select_fields = tuple(dataset_fields.values())
yield (dataset_type,
self._db.search_datasets(
query_exprs,
select_fields=select_fields,
with_source_ids=with_source_ids
))
def _do_count_by_product(self, query):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
count = self._db.count_datasets(query_exprs)
if count > 0:
yield dataset_type, count
def _do_time_count(self, period, query, ensure_single=False):
if 'time' not in query:
raise ValueError('Counting through time requires a "time" range query argument')
query = dict(query)
start, end = query['time']
del query['time']
product_quries = list(self._get_product_queries(query))
if ensure_single:
if len(product_quries) == 0:
raise ValueError('No products match search terms: %r' % query)
if len(product_quries) > 1:
raise ValueError('Multiple products match single query search: %r' %
([dt.name for q, dt in product_quries],))
for q, dataset_type in product_quries:
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
yield dataset_type, list(self._db.count_datasets_through_time(
start,
end,
period,
dataset_fields.get('time'),
query_exprs
))
def search_summaries(self, **query):
"""
Perform a search, returning just the search fields of each dataset.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: dict
"""
for dataset_type, results in self._do_search_by_product(query, return_fields=True):
for columns in results:
yield dict(columns)
def search_eager(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: list[datacube.model.Dataset]
"""
return list(self.search(**query))
| 36.196481
| 120
| 0.606741
|
from __future__ import absolute_import
import logging
from cachetools.func import lru_cache
from datacube import compat
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, check_doc_unchanged, jsonify_document, get_doc_changes, contains
from . import fields
from .exceptions import DuplicateRecordError, UnknownFieldError
_LOG = logging.getLogger(__name__)
class MetadataTypeResource(object):
def __init__(self, db):
self._db = db
def add(self, definition, allow_table_lock=False):
MetadataType.validate(definition)
name = definition['name']
existing = self._db.get_metadata_type_by_name(name)
if existing:
# TODO: Support for adding/updating search fields?
check_doc_unchanged(
existing.definition,
definition,
'Metadata Type {}'.format(name)
)
else:
self._db.add_metadata_type(
name=name,
definition=definition,
concurrently=not allow_table_lock
)
return self.get_by_name(name)
@lru_cache()
def get(self, id_):
return self._make(self._db.get_metadata_type(id_))
@lru_cache()
def get_by_name(self, name):
record = self._db.get_metadata_type_by_name(name)
if not record:
return None
return self._make(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=False):
self._db.check_dynamic_fields(concurrently=not allow_table_lock, rebuild_all=rebuild_all)
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
definition = query_row['definition']
dataset_ = definition['dataset']
return MetadataType(
query_row['name'],
dataset_,
dataset_search_fields=self._db.get_dataset_fields(query_row),
id_=query_row['id']
)
class DatasetTypeResource(object):
def __init__(self, db, metadata_type_resource):
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.add(metadata_type, allow_table_lock=False)
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_):
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if existing:
# TODO: Support for adding/updating match rules?
# They've passed us the same collection again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Dataset type {}'.format(type_.name)
)
else:
self._db.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
return self.get_by_name(type_.name)
def update(self, type_, allow_unsafe_updates=False):
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % type_.name)
def handle_unsafe(msg):
if not allow_unsafe_updates:
raise ValueError(msg)
else:
_LOG.warning("Ignoring %s", msg)
safe_keys_to_change = ('description', 'metadata')
doc_changes = get_doc_changes(existing.definition, jsonify_document(type_.definition))
for offset, old_value, new_value in doc_changes:
_LOG.info('Changing %s %s: %r -> %r', type_.name, '.'.join(offset), old_value, new_value)
key_name = offset[0]
if key_name not in safe_keys_to_change:
handle_unsafe('Potentially unsafe update: changing %r of product definition.' % key_name)
# You can safely make the match rules looser but not tighter.
if key_name == 'metadata':
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
if not contains(old_value, new_value, case_sensitive=True):
handle_unsafe('Unsafe update: new product match rules are not a superset of old ones.')
if doc_changes:
_LOG.info("Updating product %s", type_.name)
self._db.update_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
# Clear our local cache. Note that other users may still have
# cached copies for the duration of their connections.
self.get_by_name.cache_clear()
self.get.cache_clear()
else:
_LOG.info("No changes detected for product %s", type_.name)
def update_document(self, definition, allow_unsafe_update=False):
type_ = self.from_doc(definition)
return self.update(type_, allow_unsafe_updates=allow_unsafe_update)
def add_document(self, definition):
type_ = self.from_doc(definition)
return self.add(type_)
@lru_cache()
def get(self, id_):
return self._make(self._db.get_dataset_type(id_))
@lru_cache()
def get_by_name(self, name):
result = self._db.get_dataset_type_by_name(name)
if not result:
return None
return self._make(result)
def get_with_fields(self, field_names):
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
for type_ in self.get_all():
q = query.copy()
if q.pop('product', type_.name) != type_.name:
continue
if q.pop('metadata_type', type_.metadata_type.name) != type_.metadata_type.name:
continue
for key, value in list(q.items()):
try:
exprs = fields.to_expressions(type_.metadata_type.dataset_fields.get, **{key: value})
except UnknownFieldError as e:
break
try:
if all(expr.evaluate(type_.metadata_doc) for expr in exprs):
q.pop(key)
else:
break
except (AttributeError, KeyError, ValueError) as e:
continue
else:
yield type_, q
def get_all(self):
return (self._make(record) for record in self._db.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
def __init__(self, db, dataset_type_resource):
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
if not include_sources:
return self._make(self._db.get_dataset(id_), full_info=True)
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in self._db.get_dataset_sources(id_)}
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[str(source)][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[str(source)][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset):
return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False):
if not skip_sources:
for source in dataset.sources.values():
self.add(source)
was_inserted = False
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
if not was_inserted:
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
self._db.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
with self._db.begin() as transaction:
for id_ in ids:
transaction.archive_dataset(id_)
def restore(self, ids):
with self._db.begin() as transaction:
for id_ in ids:
transaction.restore_dataset(id_)
def get_field_names(self, type_name=None):
if type_name is None:
types = self.types.get_all()
else:
types = [self.types.get_by_name(type_name)]
for type_ in types:
for name in type_.metadata_type.dataset_fields:
yield name
def get_locations(self, dataset):
return self._db.get_locations(dataset.id)
def _make(self, dataset_res, full_info=False):
return Dataset(
self.types.get(dataset_res.dataset_type_ref),
dataset_res.metadata,
dataset_res.local_uri,
indexed_by=dataset_res.added_by if full_info else None,
indexed_time=dataset_res.added if full_info else None
)
def _make_many(self, query_result):
return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata):
return self._make_many(self._db.search_datasets_by_metadata(metadata))
def search(self, **query):
for dataset_type, datasets in self._do_search_by_product(query):
for dataset in self._make_many(datasets):
yield dataset
def search_by_product(self, **query):
for dataset_type, datasets in self._do_search_by_product(query):
yield dataset_type, self._make_many(datasets)
def count(self, **query):
# This may be optimised into one query in the future.
result = 0
for product_type, count in self._do_count_by_product(query):
result += count
return result
def count_by_product(self, **query):
return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query):
return self._do_time_count(period, query)
def count_product_through_time(self, period, **query):
return next(self._do_time_count(period, query, ensure_single=True))[1]
def _get_dataset_types(self, q):
types = set()
if 'product' in q.keys():
types.add(self.types.get_by_name(q['product']))
else:
# Otherwise search any metadata type that has all the given search fields.
types = self.types.get_with_fields(tuple(q.keys()))
if not types:
raise ValueError('No type of dataset has fields: %r', tuple(q.keys()))
return types
def _get_product_queries(self, query):
for dataset_type, q in self.types.search_robust(**query):
q['dataset_type_id'] = dataset_type.id
yield q, dataset_type
def _do_search_by_product(self, query, return_fields=False, with_source_ids=False):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
select_fields = None
if return_fields:
select_fields = tuple(dataset_fields.values())
yield (dataset_type,
self._db.search_datasets(
query_exprs,
select_fields=select_fields,
with_source_ids=with_source_ids
))
def _do_count_by_product(self, query):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
count = self._db.count_datasets(query_exprs)
if count > 0:
yield dataset_type, count
def _do_time_count(self, period, query, ensure_single=False):
if 'time' not in query:
raise ValueError('Counting through time requires a "time" range query argument')
query = dict(query)
start, end = query['time']
del query['time']
product_quries = list(self._get_product_queries(query))
if ensure_single:
if len(product_quries) == 0:
raise ValueError('No products match search terms: %r' % query)
if len(product_quries) > 1:
raise ValueError('Multiple products match single query search: %r' %
([dt.name for q, dt in product_quries],))
for q, dataset_type in product_quries:
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
yield dataset_type, list(self._db.count_datasets_through_time(
start,
end,
period,
dataset_fields.get('time'),
query_exprs
))
def search_summaries(self, **query):
for dataset_type, results in self._do_search_by_product(query, return_fields=True):
for columns in results:
yield dict(columns)
def search_eager(self, **query):
return list(self.search(**query))
| true
| true
|
79073ff5636a8feb04f6bf806bc00fe2c9cb0254
| 2,507
|
py
|
Python
|
xsdata/codegen/mixins.py
|
amal-khailtash/xsdata
|
f539ebf7ad9146ee5c0cad821c2ca5b2f4e8067e
|
[
"MIT"
] | null | null | null |
xsdata/codegen/mixins.py
|
amal-khailtash/xsdata
|
f539ebf7ad9146ee5c0cad821c2ca5b2f4e8067e
|
[
"MIT"
] | null | null | null |
xsdata/codegen/mixins.py
|
amal-khailtash/xsdata
|
f539ebf7ad9146ee5c0cad821c2ca5b2f4e8067e
|
[
"MIT"
] | null | null | null |
import abc
from abc import ABCMeta
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.config import GeneratorConfig
from xsdata.utils.constants import return_true
class ContainerInterface(abc.ABC):
"""Wrap a list of classes and expose a simple api for easy access and
process."""
__slots__ = ("config",)
def __init__(self, config: GeneratorConfig):
self.config = config
@abc.abstractmethod
def __iter__(self) -> Iterator[Class]:
"""Create an iterator for the class map values."""
@abc.abstractmethod
def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]:
"""Search by qualified name for a specific class with an optional
condition callable."""
@abc.abstractmethod
def find_inner(self, source: Class, qname: str) -> Class:
"""Search by qualified name for a specific inner class or fail."""
@abc.abstractmethod
def add(self, item: Class):
"""Add class item to the container."""
@abc.abstractmethod
def extend(self, items: List[Class]):
"""Add a list of classes the container."""
@abc.abstractmethod
def reset(self, item: Class, qname: str):
"""Update the given class qualified name."""
class HandlerInterface(abc.ABC):
"""Class handler interface."""
__slots__ = ()
@abc.abstractmethod
def process(self, target: Class):
"""Process the given target class."""
class RelativeHandlerInterface(HandlerInterface, metaclass=ABCMeta):
"""Class handler interface with access to the complete classes
container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
def base_attrs(self, target: Class) -> List[Attr]:
attrs: List[Attr] = []
for extension in target.extensions:
base = self.container.find(extension.type.qname)
assert base is not None
attrs.extend(base.attrs)
attrs.extend(self.base_attrs(base))
return attrs
class ContainerHandlerInterface(abc.ABC):
"""Class container."""
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
@abc.abstractmethod
def run(self):
"""Run the process for the whole container."""
| 27.25
| 85
| 0.67491
|
import abc
from abc import ABCMeta
from typing import Callable
from typing import Iterator
from typing import List
from typing import Optional
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.config import GeneratorConfig
from xsdata.utils.constants import return_true
class ContainerInterface(abc.ABC):
__slots__ = ("config",)
def __init__(self, config: GeneratorConfig):
self.config = config
@abc.abstractmethod
def __iter__(self) -> Iterator[Class]:
@abc.abstractmethod
def find(self, qname: str, condition: Callable = return_true) -> Optional[Class]:
@abc.abstractmethod
def find_inner(self, source: Class, qname: str) -> Class:
@abc.abstractmethod
def add(self, item: Class):
@abc.abstractmethod
def extend(self, items: List[Class]):
@abc.abstractmethod
def reset(self, item: Class, qname: str):
class HandlerInterface(abc.ABC):
__slots__ = ()
@abc.abstractmethod
def process(self, target: Class):
class RelativeHandlerInterface(HandlerInterface, metaclass=ABCMeta):
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
def base_attrs(self, target: Class) -> List[Attr]:
attrs: List[Attr] = []
for extension in target.extensions:
base = self.container.find(extension.type.qname)
assert base is not None
attrs.extend(base.attrs)
attrs.extend(self.base_attrs(base))
return attrs
class ContainerHandlerInterface(abc.ABC):
__slots__ = "container"
def __init__(self, container: ContainerInterface):
self.container = container
@abc.abstractmethod
def run(self):
| true
| true
|
79074077683610c76ebeae6e8822c97a1740001e
| 3,640
|
py
|
Python
|
pycoin/networks/legacy_networks.py
|
prahaladbelavadi/pycoin-trial
|
afffd068654497e00e5e39d144819bdeaf1f8a55
|
[
"MIT"
] | 5
|
2017-12-15T13:40:50.000Z
|
2021-12-18T13:18:54.000Z
|
pycoin/networks/legacy_networks.py
|
prahaladbelavadi/pycoin-trial
|
afffd068654497e00e5e39d144819bdeaf1f8a55
|
[
"MIT"
] | 1
|
2018-08-06T03:48:14.000Z
|
2018-09-03T03:01:03.000Z
|
pycoin/networks/legacy_networks.py
|
prahaladbelavadi/pycoin-trial
|
afffd068654497e00e5e39d144819bdeaf1f8a55
|
[
"MIT"
] | 6
|
2018-08-24T18:49:47.000Z
|
2021-01-19T10:04:08.000Z
|
# this file is deprecated and will soon be folded into all.py
from collections import namedtuple
from pycoin.serialize import h2b
NetworkValues = namedtuple('NetworkValues',
('network_name', 'subnet_name', 'code', 'wif', 'address',
'pay_to_script', 'prv32', 'pub32'))
NETWORKS = (
# VIA viacoin mainnet : xprv/xpub
NetworkValues("Viacoin", "mainnet", "VIA", b'\xc7', b'\x47', b'\x21', h2b('0488ADE4'), h2b('0488B21E')),
# VIA viacoin testnet : tprv/tpub
NetworkValues("Viacoin", "testnet", "TVI", b'\xff', b'\x7f', b'\xc4', h2b('04358394'), h2b('043587CF')),
# FTC feathercoin mainnet : xprv/xpub
NetworkValues(
"Feathercoin", "mainnet", "FTC", b'\x8e', b'\x0e', b'\x60', h2b('0488ADE4'), h2b('0488B21E')),
# FTC feathercoin testnet : tprv/tpub
NetworkValues(
"Feathercoin", "testnet", "FTX", b'\xC1', b'\x41', b'\xc4', h2b('04358394'), h2b('043587CF')),
# DOGE Dogecoin mainnet : dogv/dogp
NetworkValues(
"Dogecoin", "mainnet", "DOGE", b'\x9e', b'\x1e', b'\x16', h2b("02FD3955"), h2b("02FD3929")),
# DOGE Dogecoin testnet : tgpv/tgub
NetworkValues(
"Dogecoin", "testnet", "XDT", b'\xf1', b'\x71', b'\xc4', h2b("0432a9a8"), h2b("0432a243")),
# BC BlackCoin mainnet : bcpv/bcpb
NetworkValues("Blackcoin", "mainnet", "BC", b'\x99', b'\x19', None, h2b("02cfbf60"), h2b("02cfbede")),
# DRK Dash mainnet : drkv/drkp
NetworkValues(
"Dash", "mainnet", "DASH", b'\xcc', b'\x4c', b'\x10', h2b("02fe52f8"), h2b("02fe52cc")),
# DRK Dash testnet : DRKV/DRKP
NetworkValues(
"Dash", "testnet", "tDASH", b'\xef', b'\x8c', b'\x13', h2b("3a8061a0"), h2b("3a805837")),
# MEC Megacoin mainnet : mecv/mecp
NetworkValues("Megacoin", "mainnet", "MEC", b'\xb2', b'\x32', None, h2b("03a04db7"), h2b("03a04d8b")),
NetworkValues(
"Myriadcoin", "mainnet", "MYR", b'\xb2', b'\x32', b'\x09', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues(
"Unobtanium", "mainnet", "UNO", b'\xe0', b'\x82', b'\x1e', h2b('0488ADE4'), h2b('0488B21E')),
# JBS Jumbucks mainnet : jprv/jpub
NetworkValues("Jumbucks", "mainnet", "JBS", b'\xab', b'\x2b', None, h2b('037a6460'), h2b('037a689a')),
# MZC Mazacoin mainnet: xprv/xpub
NetworkValues("Mazacoin", "mainnet", "MZC", b'\xe0', b'\x32', b'\9', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues(
"Riecoin", "mainnet", "RIC", b'\x80', b'\x3c', b'\x05', h2b('0488ADE4'), h2b('0488B21E')),
# DFC Defcoin mainnet: dfcv/dfcp
NetworkValues("DEFCOIN", "mainnet", "DFC", b'\x9e', b'\x1e', b'\5', h2b("02FA54D7"), h2b("02FA54AD")),
# FAI faircoin mainnet : xprv/xpub
NetworkValues(
"Faircoin", "mainnet", "FAI", b'\xdf', b'\x5f', b'\x24', h2b("0488ADE4"), h2b("0488B21E")),
# ARG argentum mainnet : xprv/xpub
NetworkValues("Argentum", "mainnet", "ARG", b'\x97', b'\x17', b'\5', h2b("0488ADE4"), h2b("0488B21E")),
# ZEC Zcash mainnet : xprv/xpub
NetworkValues("Zcash", "mainnet", "ZEC", b'\x80', b'\x1C\xB8',
b'\x1C\xBD', h2b("0488ADE4"), h2b("0488B21E")),
# BTCD BitcoinDark mainnet : xprv/xpub
NetworkValues("BitcoinDark", "mainnet", "BTCD", b'\x44', b'\x3C', b'\55', h2b('0488ADE4'), h2b('0488B21E')),
# DCR Decred mainnet : dprv/dpub
NetworkValues("Decred", "mainnet", "DCR", b'\x22\xDE', b'\x07\x3F', b'\x07\x1A', h2b('02FDA4E8'), h2b('02FDA926')),
# DCR Decred testnet : tprv/tpub
NetworkValues("Decred", "testnet", "DCRT", b'\x23\x0E', b'\x0F\x21', b'\x0E\x6C', h2b('04358397'), h2b('043587D1')),
)
| 42.325581
| 120
| 0.58956
|
from collections import namedtuple
from pycoin.serialize import h2b
NetworkValues = namedtuple('NetworkValues',
('network_name', 'subnet_name', 'code', 'wif', 'address',
'pay_to_script', 'prv32', 'pub32'))
NETWORKS = (
NetworkValues("Viacoin", "mainnet", "VIA", b'\xc7', b'\x47', b'\x21', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues("Viacoin", "testnet", "TVI", b'\xff', b'\x7f', b'\xc4', h2b('04358394'), h2b('043587CF')),
NetworkValues(
"Feathercoin", "mainnet", "FTC", b'\x8e', b'\x0e', b'\x60', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues(
"Feathercoin", "testnet", "FTX", b'\xC1', b'\x41', b'\xc4', h2b('04358394'), h2b('043587CF')),
NetworkValues(
"Dogecoin", "mainnet", "DOGE", b'\x9e', b'\x1e', b'\x16', h2b("02FD3955"), h2b("02FD3929")),
NetworkValues(
"Dogecoin", "testnet", "XDT", b'\xf1', b'\x71', b'\xc4', h2b("0432a9a8"), h2b("0432a243")),
NetworkValues("Blackcoin", "mainnet", "BC", b'\x99', b'\x19', None, h2b("02cfbf60"), h2b("02cfbede")),
NetworkValues(
"Dash", "mainnet", "DASH", b'\xcc', b'\x4c', b'\x10', h2b("02fe52f8"), h2b("02fe52cc")),
NetworkValues(
"Dash", "testnet", "tDASH", b'\xef', b'\x8c', b'\x13', h2b("3a8061a0"), h2b("3a805837")),
NetworkValues("Megacoin", "mainnet", "MEC", b'\xb2', b'\x32', None, h2b("03a04db7"), h2b("03a04d8b")),
NetworkValues(
"Myriadcoin", "mainnet", "MYR", b'\xb2', b'\x32', b'\x09', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues(
"Unobtanium", "mainnet", "UNO", b'\xe0', b'\x82', b'\x1e', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues("Jumbucks", "mainnet", "JBS", b'\xab', b'\x2b', None, h2b('037a6460'), h2b('037a689a')),
NetworkValues("Mazacoin", "mainnet", "MZC", b'\xe0', b'\x32', b'\9', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues(
"Riecoin", "mainnet", "RIC", b'\x80', b'\x3c', b'\x05', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues("DEFCOIN", "mainnet", "DFC", b'\x9e', b'\x1e', b'\5', h2b("02FA54D7"), h2b("02FA54AD")),
NetworkValues(
"Faircoin", "mainnet", "FAI", b'\xdf', b'\x5f', b'\x24', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues("Argentum", "mainnet", "ARG", b'\x97', b'\x17', b'\5', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues("Zcash", "mainnet", "ZEC", b'\x80', b'\x1C\xB8',
b'\x1C\xBD', h2b("0488ADE4"), h2b("0488B21E")),
NetworkValues("BitcoinDark", "mainnet", "BTCD", b'\x44', b'\x3C', b'\55', h2b('0488ADE4'), h2b('0488B21E')),
NetworkValues("Decred", "mainnet", "DCR", b'\x22\xDE', b'\x07\x3F', b'\x07\x1A', h2b('02FDA4E8'), h2b('02FDA926')),
NetworkValues("Decred", "testnet", "DCRT", b'\x23\x0E', b'\x0F\x21', b'\x0E\x6C', h2b('04358397'), h2b('043587D1')),
)
| true
| true
|
790740fe9c5967cb2af78fbf75d463a80292047d
| 23,446
|
py
|
Python
|
torchvision/models/detection/faster_rcnn.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T14:11:12.000Z
|
2022-03-08T14:11:12.000Z
|
torchvision/models/detection/faster_rcnn.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/models/detection/faster_rcnn.py
|
brianjo/vision
|
a8bde78130fd8c956780d85693d0f51912013732
|
[
"BSD-3-Clause"
] | null | null | null |
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ..._internally_replaced_utils import load_state_dict_from_url
from ...ops import misc as misc_nn_ops
from ..mobilenetv3 import mobilenet_v3_large
from ..resnet import resnet50
from ._utils import overwrite_eps
from .anchor_utils import AnchorGenerator
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers, _mobilenet_extractor
from .generalized_rcnn import GeneralizedRCNN
from .roi_heads import RoIHeads
from .rpn import RPNHead, RegionProposalNetwork
from .transform import GeneralizedRCNNTransform
__all__ = [
"FasterRCNN",
"fasterrcnn_resnet50_fpn",
"fasterrcnn_mobilenet_v3_large_320_fpn",
"fasterrcnn_mobilenet_v3_large_fpn",
]
class FasterRCNN(GeneralizedRCNN):
"""
Implements Faster R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
rpn_score_thresh (float): during inference, only return proposals with a classification score
greater than rpn_score_thresh
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import FasterRCNN
>>> from torchvision.models.detection.rpn import AnchorGenerator
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
>>> # FasterRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> # put the pieces together inside a FasterRCNN model
>>> model = FasterRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if box_predictor is not None:
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor is not specified")
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
if rpn_head is None:
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
# Box
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super().__init__(backbone, rpn, roi_heads, transform)
class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Args:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, representation_size):
super().__init__()
self.fc6 = nn.Linear(in_channels, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
class FastRCNNPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Args:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes):
super().__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
model_urls = {
"fasterrcnn_resnet50_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth",
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth",
"fasterrcnn_mobilenet_v3_large_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth",
}
def fasterrcnn_resnet50_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.
Reference: `"Faster R-CNN: Towards Real-Time Object Detection with
Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
>>> # For training
>>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
>>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]
>>> labels = torch.randint(1, 91, (4, 11))
>>> images = list(image for image in images)
>>> targets = []
>>> for i in range(len(images)):
>>> d = {}
>>> d['boxes'] = boxes[i]
>>> d['labels'] = labels[i]
>>> targets.append(d)
>>> output = model(images, targets)
>>> # For inference
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["fasterrcnn_resnet50_fpn_coco"], progress=progress)
model.load_state_dict(state_dict)
overwrite_eps(model, 0.0)
return model
def _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=False,
progress=True,
num_classes=91,
pretrained_backbone=True,
trainable_backbone_layers=None,
**kwargs,
):
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
pretrained_backbone = False
backbone = mobilenet_v3_large(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers)
anchor_sizes = (
(
32,
64,
128,
256,
512,
),
) * 3
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs
)
if pretrained:
if model_urls.get(weights_name, None) is None:
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
def fasterrcnn_mobilenet_v3_large_320_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
weights_name = "fasterrcnn_mobilenet_v3_large_320_fpn_coco"
defaults = {
"min_size": 320,
"max_size": 640,
"rpn_pre_nms_top_n_test": 150,
"rpn_post_nms_top_n_test": 150,
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
def fasterrcnn_mobilenet_v3_large_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
weights_name = "fasterrcnn_mobilenet_v3_large_fpn_coco"
defaults = {
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
| 44.154426
| 139
| 0.66425
|
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ..._internally_replaced_utils import load_state_dict_from_url
from ...ops import misc as misc_nn_ops
from ..mobilenetv3 import mobilenet_v3_large
from ..resnet import resnet50
from ._utils import overwrite_eps
from .anchor_utils import AnchorGenerator
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers, _mobilenet_extractor
from .generalized_rcnn import GeneralizedRCNN
from .roi_heads import RoIHeads
from .rpn import RPNHead, RegionProposalNetwork
from .transform import GeneralizedRCNNTransform
__all__ = [
"FasterRCNN",
"fasterrcnn_resnet50_fpn",
"fasterrcnn_mobilenet_v3_large_320_fpn",
"fasterrcnn_mobilenet_v3_large_fpn",
]
class FasterRCNN(GeneralizedRCNN):
def __init__(
self,
backbone,
num_classes=None,
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if box_predictor is not None:
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor is not specified")
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
if rpn_head is None:
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super().__init__(backbone, rpn, roi_heads, transform)
class TwoMLPHead(nn.Module):
def __init__(self, in_channels, representation_size):
super().__init__()
self.fc6 = nn.Linear(in_channels, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
class FastRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
model_urls = {
"fasterrcnn_resnet50_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth",
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth",
"fasterrcnn_mobilenet_v3_large_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth",
}
def fasterrcnn_resnet50_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
pretrained_backbone = False
backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["fasterrcnn_resnet50_fpn_coco"], progress=progress)
model.load_state_dict(state_dict)
overwrite_eps(model, 0.0)
return model
def _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=False,
progress=True,
num_classes=91,
pretrained_backbone=True,
trainable_backbone_layers=None,
**kwargs,
):
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
pretrained_backbone = False
backbone = mobilenet_v3_large(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers)
anchor_sizes = (
(
32,
64,
128,
256,
512,
),
) * 3
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs
)
if pretrained:
if model_urls.get(weights_name, None) is None:
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
def fasterrcnn_mobilenet_v3_large_320_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
weights_name = "fasterrcnn_mobilenet_v3_large_320_fpn_coco"
defaults = {
"min_size": 320,
"max_size": 640,
"rpn_pre_nms_top_n_test": 150,
"rpn_post_nms_top_n_test": 150,
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
def fasterrcnn_mobilenet_v3_large_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
weights_name = "fasterrcnn_mobilenet_v3_large_fpn_coco"
defaults = {
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
| true
| true
|
79074153b6a661b8cb263aaead1be99aac462d24
| 601
|
py
|
Python
|
src/js/components/Map/svg/makeimport.py
|
pdyxs/WhereTheHeartIs
|
0a1267550cd0f0d06e84fd91f41ef097921ca048
|
[
"MIT"
] | null | null | null |
src/js/components/Map/svg/makeimport.py
|
pdyxs/WhereTheHeartIs
|
0a1267550cd0f0d06e84fd91f41ef097921ca048
|
[
"MIT"
] | null | null | null |
src/js/components/Map/svg/makeimport.py
|
pdyxs/WhereTheHeartIs
|
0a1267550cd0f0d06e84fd91f41ef097921ca048
|
[
"MIT"
] | null | null | null |
import sys
import os
if len(sys.argv) > 1:
folder = sys.argv[1]
jsfile = open("./" + folder + ".js", "w+")
images = [f[:len(f)-4] for f in os.listdir("./" + folder) if f.endswith(".svg")]
varnames = []
for i in images:
varname = "svg_" + i.replace('-', '_');
varnames.append(varname)
jsfile.write("import " + varname + " from './" + folder + "/" + i + ".svg';\n")
jsfile.write("\n")
jsfile.write("const " + folder + " = [" + ", ".join(varnames) + "];\n")
jsfile.write("\n")
jsfile.write("export default " + folder + ";")
jsfile.close()
| 33.388889
| 87
| 0.515807
|
import sys
import os
if len(sys.argv) > 1:
folder = sys.argv[1]
jsfile = open("./" + folder + ".js", "w+")
images = [f[:len(f)-4] for f in os.listdir("./" + folder) if f.endswith(".svg")]
varnames = []
for i in images:
varname = "svg_" + i.replace('-', '_');
varnames.append(varname)
jsfile.write("import " + varname + " from './" + folder + "/" + i + ".svg';\n")
jsfile.write("\n")
jsfile.write("const " + folder + " = [" + ", ".join(varnames) + "];\n")
jsfile.write("\n")
jsfile.write("export default " + folder + ";")
jsfile.close()
| true
| true
|
790741977248c62a95430223435d1b1457462410
| 641
|
py
|
Python
|
tracking/management/commands/harvest_dfes_feed.py
|
fahmidaward/resource_tracking
|
8531a01e8b5c5fb20dcb8bef11ab17d7b68f4624
|
[
"BSD-3-Clause"
] | null | null | null |
tracking/management/commands/harvest_dfes_feed.py
|
fahmidaward/resource_tracking
|
8531a01e8b5c5fb20dcb8bef11ab17d7b68f4624
|
[
"BSD-3-Clause"
] | null | null | null |
tracking/management/commands/harvest_dfes_feed.py
|
fahmidaward/resource_tracking
|
8531a01e8b5c5fb20dcb8bef11ab17d7b68f4624
|
[
"BSD-3-Clause"
] | null | null | null |
from tracking.harvest import save_dfes_avl
from django.core.management.base import BaseCommand
import logging
LOGGER = logging.getLogger('tracking_points')
class Command(BaseCommand):
help = "Runs harvest_tracking_email to harvest points"
def handle(self, *args, **options):
LOGGER.info('Harvesting DFES feed')
try:
print("Harvested {} from DFES; created {}, updated {}, ingored {}; Earliest seen {}, Lastest seen {}.".format(*save_dfes_avl()))
#LOGGER.info("Updated {} of {} scanned DFES devices".format(updated, num_records))
except Exception as e:
LOGGER.error(e)
| 32.05
| 140
| 0.673947
|
from tracking.harvest import save_dfes_avl
from django.core.management.base import BaseCommand
import logging
LOGGER = logging.getLogger('tracking_points')
class Command(BaseCommand):
help = "Runs harvest_tracking_email to harvest points"
def handle(self, *args, **options):
LOGGER.info('Harvesting DFES feed')
try:
print("Harvested {} from DFES; created {}, updated {}, ingored {}; Earliest seen {}, Lastest seen {}.".format(*save_dfes_avl()))
except Exception as e:
LOGGER.error(e)
| true
| true
|
790741cb1325bab36148da984a9533c55701a27d
| 1,366
|
py
|
Python
|
src/const.py
|
thales-ucas/wumpus
|
ac0f0e8520d367767eb15fd5e696ccc6b8e98098
|
[
"Apache-2.0"
] | null | null | null |
src/const.py
|
thales-ucas/wumpus
|
ac0f0e8520d367767eb15fd5e696ccc6b8e98098
|
[
"Apache-2.0"
] | null | null | null |
src/const.py
|
thales-ucas/wumpus
|
ac0f0e8520d367767eb15fd5e696ccc6b8e98098
|
[
"Apache-2.0"
] | null | null | null |
class Const:
"""
常量
"""
class ConstError(TypeError):pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const (%s)" %name)
self.__dict__[name]=value
LAYOUT = Const()
"""
布局
"""
LAYOUT.SCREEN_WIDTH = 500
LAYOUT.SCREEN_HEIGHT = 600
LAYOUT.SIZE = 4
LAYOUT.TERRAIN_X = 50
LAYOUT.TERRAIN_Y = 20
LAYOUT.TILE_WIDTH = 100
LAYOUT.TILE_HEIGHT = 90
LAYOUT.SCOREBOARD_X = 50
LAYOUT.SCOREBOARD_Y = 400
LAYOUT.POPUP_X = 100
LAYOUT.POPUP_Y = 400
LAYOUT.POPUP_WIDTH = 300
LAYOUT.POPUP_HEIGHT = 200
IMAGE = Const()
"""
图片
"""
IMAGE.TILE = "assets/tile.png"# 地砖
IMAGE.MIST = "assets/mist.png"# 战争迷雾
IMAGE.HERO = "assets/hero.png" # 英雄
IMAGE.MONSTER = "assets/monster.png" # 怪物
IMAGE.PIT = "assets/pit.png" # 陷阱
IMAGE.GOLD = "assets/gold.png" # 黄金
IMAGE.BREEZE = "assets/breeze.png" # 微风
IMAGE.STRENCH = "assets/strench.png" # 臭气
EVENT = Const()
"""
事件
"""
EVENT.GAME_OVER = "gameOver" # 游戏结束
EVENT.GAME_CLEAR = "gameClear" # 游戏通关
EVENT.MONSTER_DEAD = "monsterDead" # 怪兽死亡
EVENT.HERO_WALK = "heroWalk" # 英雄走动
EVENT.HERO_ATTACK = "heroAttack" # 英雄攻击
EVENT.DANGER = "danger" # 遭遇危险
ENCOUNTER = Const()
"""
遭遇
"""
ENCOUNTER.MONSTER = 21 # 怪物
ENCOUNTER.PIT = 22 # 坑洞
ENCOUNTER.GOLD = 10 # 黄金
SCORE = Const()
"""
分数
"""
SCORE.WALK = -1 # 行走
SCORE.WIN = 1000 # 胜利
SCORE.LOSE = -1000 # 失败
SCORE.ATTACK = -10 # 攻击
| 20.088235
| 60
| 0.682284
|
class Const:
class ConstError(TypeError):pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const (%s)" %name)
self.__dict__[name]=value
LAYOUT = Const()
LAYOUT.SCREEN_WIDTH = 500
LAYOUT.SCREEN_HEIGHT = 600
LAYOUT.SIZE = 4
LAYOUT.TERRAIN_X = 50
LAYOUT.TERRAIN_Y = 20
LAYOUT.TILE_WIDTH = 100
LAYOUT.TILE_HEIGHT = 90
LAYOUT.SCOREBOARD_X = 50
LAYOUT.SCOREBOARD_Y = 400
LAYOUT.POPUP_X = 100
LAYOUT.POPUP_Y = 400
LAYOUT.POPUP_WIDTH = 300
LAYOUT.POPUP_HEIGHT = 200
IMAGE = Const()
IMAGE.TILE = "assets/tile.png"# 地砖
IMAGE.MIST = "assets/mist.png"# 战争迷雾
IMAGE.HERO = "assets/hero.png" # 英雄
IMAGE.MONSTER = "assets/monster.png" # 怪物
IMAGE.PIT = "assets/pit.png" # 陷阱
IMAGE.GOLD = "assets/gold.png" # 黄金
IMAGE.BREEZE = "assets/breeze.png" # 微风
IMAGE.STRENCH = "assets/strench.png" # 臭气
EVENT = Const()
EVENT.GAME_OVER = "gameOver" # 游戏结束
EVENT.GAME_CLEAR = "gameClear" # 游戏通关
EVENT.MONSTER_DEAD = "monsterDead" # 怪兽死亡
EVENT.HERO_WALK = "heroWalk" # 英雄走动
EVENT.HERO_ATTACK = "heroAttack" # 英雄攻击
EVENT.DANGER = "danger" # 遭遇危险
ENCOUNTER = Const()
ENCOUNTER.MONSTER = 21 # 怪物
ENCOUNTER.PIT = 22 # 坑洞
ENCOUNTER.GOLD = 10 # 黄金
SCORE = Const()
SCORE.WALK = -1 # 行走
SCORE.WIN = 1000 # 胜利
SCORE.LOSE = -1000 # 失败
SCORE.ATTACK = -10 # 攻击
| true
| true
|
7907427be8f72396c3113eab88bebac73805bbb9
| 2,041
|
py
|
Python
|
utils/backup.py
|
carolinscholl/SORN
|
99f908c88265ecc26dad195b56bebfa12838591f
|
[
"MIT"
] | null | null | null |
utils/backup.py
|
carolinscholl/SORN
|
99f908c88265ecc26dad195b56bebfa12838591f
|
[
"MIT"
] | null | null | null |
utils/backup.py
|
carolinscholl/SORN
|
99f908c88265ecc26dad195b56bebfa12838591f
|
[
"MIT"
] | null | null | null |
"""Backup handler
This script is contains the backup handling functions.
"""
import os
import time
import pickle
import shutil
from shutil import ignore_patterns
import pypianoroll
import numpy as np
def backup_pickle(experiment, stats):
''''
Back up handling function.
Arguments:
experiment -- Experiment object, contains the initial sorn parameters
stats -- bunch of stats stored during the simulation
'''
params = experiment.init_params
results_dir = experiment.results_dir
files_tosave = experiment.files_tosave
directory = ('backup/{}'.format(results_dir))
# creates a new directory for storing the results
# sleeps for a short time to avoid conflicts when running in parallel
time.sleep(np.random.rand())
for n_sim in range(1, 1000):
final_dir = '{}_{}/'.format(directory, str(n_sim))
if not os.path.exists(final_dir):
try:
os.makedirs(final_dir)
break
except:
pass
if 'params' in files_tosave:
with open(final_dir+'init_params.p', 'wb') as f:
pickle.dump(params, f)
if 'stats' in files_tosave:
# generate MIDI track if MusicTask
if hasattr(stats, 'track'):
stats.track.write(final_dir+'sample.mid')
# delete attributes that occupy a lot of memory space
if hasattr(stats, 'input_index_readout'):
del stats.input_index_readout
if hasattr(stats, 'input_readout'):
del stats.input_readout
if hasattr(stats, 'raster_readout'):
del stats.raster_readout
if hasattr(stats, 't_past'):
del stats.t_past
with open(final_dir+'stats.p', 'wb') as f:
pickle.dump(stats, f)
if 'scripts' in files_tosave:
# TODO: this should not need a '_'
for f in ['utils', 'common', results_dir.split('_')[0]]:
shutil.copytree(f, final_dir+f,
ignore=ignore_patterns('*.pyc', '*.git'))
| 30.014706
| 73
| 0.622244
|
import os
import time
import pickle
import shutil
from shutil import ignore_patterns
import pypianoroll
import numpy as np
def backup_pickle(experiment, stats):
params = experiment.init_params
results_dir = experiment.results_dir
files_tosave = experiment.files_tosave
directory = ('backup/{}'.format(results_dir))
time.sleep(np.random.rand())
for n_sim in range(1, 1000):
final_dir = '{}_{}/'.format(directory, str(n_sim))
if not os.path.exists(final_dir):
try:
os.makedirs(final_dir)
break
except:
pass
if 'params' in files_tosave:
with open(final_dir+'init_params.p', 'wb') as f:
pickle.dump(params, f)
if 'stats' in files_tosave:
if hasattr(stats, 'track'):
stats.track.write(final_dir+'sample.mid')
if hasattr(stats, 'input_index_readout'):
del stats.input_index_readout
if hasattr(stats, 'input_readout'):
del stats.input_readout
if hasattr(stats, 'raster_readout'):
del stats.raster_readout
if hasattr(stats, 't_past'):
del stats.t_past
with open(final_dir+'stats.p', 'wb') as f:
pickle.dump(stats, f)
if 'scripts' in files_tosave:
for f in ['utils', 'common', results_dir.split('_')[0]]:
shutil.copytree(f, final_dir+f,
ignore=ignore_patterns('*.pyc', '*.git'))
| true
| true
|
7907428d61716a5a24ac23ab8abac0f05f220c27
| 8,794
|
py
|
Python
|
vmware_nsx/services/lbaas/nsx_v3/v2/lb_driver_v2.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/services/lbaas/nsx_v3/v2/lb_driver_v2.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/services/lbaas/nsx_v3/v2/lb_driver_v2.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_consts
from neutron_lib import exceptions as n_exc
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.db import db as nsx_db
from vmware_nsx.services.lbaas import base_mgr
from vmware_nsx.services.lbaas import lb_helper
from vmware_nsx.services.lbaas import lb_translators
from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
from vmware_nsx.services.lbaas.octavia import constants as oct_const
LOG = logging.getLogger(__name__)
class NotImplementedManager(object):
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
it is missing any of the required object managers.
"""
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
@log_helpers.log_method_call
def __init__(self):
super(EdgeLoadbalancerDriverV2, self).__init__()
# Init all LBaaS objects
# Note(asarfaty): self.lbv2_driver is not yet defined at init time
# so lambda is used to retrieve it later.
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
"loadbalancer",
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
lb_translators.lb_loadbalancer_obj_to_dict,
lambda: self.lbv2_driver.load_balancer)
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
"listener",
listener_mgr.EdgeListenerManagerFromDict(),
lb_translators.lb_listener_obj_to_dict,
lambda: self.lbv2_driver.listener)
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
"pool",
pool_mgr.EdgePoolManagerFromDict(),
lb_translators.lb_pool_obj_to_dict,
lambda: self.lbv2_driver.pool)
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
"member",
member_mgr.EdgeMemberManagerFromDict(),
lb_translators.lb_member_obj_to_dict,
lambda: self.lbv2_driver.member)
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
"healthmonitor",
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
lb_translators.lb_hm_obj_to_dict,
lambda: self.lbv2_driver.health_monitor)
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7policy",
l7policy_mgr.EdgeL7PolicyManagerFromDict(),
lb_translators.lb_l7policy_obj_to_dict,
lambda: self.lbv2_driver.l7policy)
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7rule",
l7rule_mgr.EdgeL7RuleManagerFromDict(),
lb_translators.lb_l7rule_obj_to_dict,
lambda: self.lbv2_driver.l7rule)
self._subscribe_router_delete_callback()
def _subscribe_router_delete_callback(self):
# Check if there is any LB attachment for the NSX router.
# This callback is subscribed here to prevent router/GW/interface
# deletion if it still has LB service attached to it.
#Note(asarfaty): Those callbacks are used by Octavia as well even
# though they are bound only here
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _unsubscribe_router_delete_callback(self):
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _get_lb_ports(self, context, subnet_ids):
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
'fixed_ips': {'subnet_id': subnet_ids}}
return self.loadbalancer.core_plugin.get_ports(
context, filters=filters)
def _check_lb_service_on_router(self, resource, event, trigger,
payload=None):
"""Prevent removing a router GW or deleting a router used by LB"""
router_id = payload.resource_id
context = payload.context
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
if not nsx_router_id:
# Skip non-v3 routers (could be a V router in case of TVD plugin)
return
nsxlib = self.loadbalancer.core_plugin.nsxlib
service_client = nsxlib.load_balancer.service
# Check if there is any lb service on nsx router
lb_service = service_client.get_router_lb_service(nsx_router_id)
if lb_service:
msg = _('Cannot delete a %s as it still has lb service '
'attachment') % resource
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
# Also check if there are any loadbalancers attached to this router
# subnets
core_plugin = self.loadbalancer.core_plugin
router_subnets = core_plugin._load_router_subnet_cidrs_from_db(
context.elevated(), router_id)
subnet_ids = [subnet['id'] for subnet in router_subnets]
if subnet_ids and self._get_lb_ports(context.elevated(), subnet_ids):
msg = (_('Cannot delete a %s as it used by a loadbalancer') %
resource)
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
def _check_lb_service_on_router_interface(
self, resource, event, trigger, payload=None):
# Prevent removing the interface of an LB subnet from a router
router_id = payload.resource_id
subnet_id = payload.metadata.get('subnet_id')
if not router_id or not subnet_id:
return
nsx_router_id = nsx_db.get_nsx_router_id(payload.context.session,
router_id)
if not nsx_router_id:
# Skip non-v3 routers (could be a V router in case of TVD plugin)
return
# get LB ports and check if any loadbalancer is using this subnet
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
msg = _('Cannot delete a router interface as it used by a '
'loadbalancer')
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
class DummyLoadbalancerDriverV2(object):
@log_helpers.log_method_call
def __init__(self):
self.loadbalancer = NotImplementedManager()
self.listener = NotImplementedManager()
self.pool = NotImplementedManager()
self.member = NotImplementedManager()
self.health_monitor = NotImplementedManager()
self.l7policy = NotImplementedManager()
self.l7rule = NotImplementedManager()
| 44.190955
| 78
| 0.690243
|
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_consts
from neutron_lib import exceptions as n_exc
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.db import db as nsx_db
from vmware_nsx.services.lbaas import base_mgr
from vmware_nsx.services.lbaas import lb_helper
from vmware_nsx.services.lbaas import lb_translators
from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
from vmware_nsx.services.lbaas.octavia import constants as oct_const
LOG = logging.getLogger(__name__)
class NotImplementedManager(object):
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
@log_helpers.log_method_call
def __init__(self):
super(EdgeLoadbalancerDriverV2, self).__init__()
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
"loadbalancer",
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
lb_translators.lb_loadbalancer_obj_to_dict,
lambda: self.lbv2_driver.load_balancer)
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
"listener",
listener_mgr.EdgeListenerManagerFromDict(),
lb_translators.lb_listener_obj_to_dict,
lambda: self.lbv2_driver.listener)
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
"pool",
pool_mgr.EdgePoolManagerFromDict(),
lb_translators.lb_pool_obj_to_dict,
lambda: self.lbv2_driver.pool)
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
"member",
member_mgr.EdgeMemberManagerFromDict(),
lb_translators.lb_member_obj_to_dict,
lambda: self.lbv2_driver.member)
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
"healthmonitor",
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
lb_translators.lb_hm_obj_to_dict,
lambda: self.lbv2_driver.health_monitor)
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7policy",
l7policy_mgr.EdgeL7PolicyManagerFromDict(),
lb_translators.lb_l7policy_obj_to_dict,
lambda: self.lbv2_driver.l7policy)
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
"l7rule",
l7rule_mgr.EdgeL7RuleManagerFromDict(),
lb_translators.lb_l7rule_obj_to_dict,
lambda: self.lbv2_driver.l7rule)
self._subscribe_router_delete_callback()
def _subscribe_router_delete_callback(self):
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.subscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _unsubscribe_router_delete_callback(self):
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router,
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.unsubscribe(self._check_lb_service_on_router_interface,
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
def _get_lb_ports(self, context, subnet_ids):
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
'fixed_ips': {'subnet_id': subnet_ids}}
return self.loadbalancer.core_plugin.get_ports(
context, filters=filters)
def _check_lb_service_on_router(self, resource, event, trigger,
payload=None):
router_id = payload.resource_id
context = payload.context
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id)
if not nsx_router_id:
return
nsxlib = self.loadbalancer.core_plugin.nsxlib
service_client = nsxlib.load_balancer.service
lb_service = service_client.get_router_lb_service(nsx_router_id)
if lb_service:
msg = _('Cannot delete a %s as it still has lb service '
'attachment') % resource
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
core_plugin = self.loadbalancer.core_plugin
router_subnets = core_plugin._load_router_subnet_cidrs_from_db(
context.elevated(), router_id)
subnet_ids = [subnet['id'] for subnet in router_subnets]
if subnet_ids and self._get_lb_ports(context.elevated(), subnet_ids):
msg = (_('Cannot delete a %s as it used by a loadbalancer') %
resource)
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
def _check_lb_service_on_router_interface(
self, resource, event, trigger, payload=None):
router_id = payload.resource_id
subnet_id = payload.metadata.get('subnet_id')
if not router_id or not subnet_id:
return
nsx_router_id = nsx_db.get_nsx_router_id(payload.context.session,
router_id)
if not nsx_router_id:
return
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
msg = _('Cannot delete a router interface as it used by a '
'loadbalancer')
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
class DummyLoadbalancerDriverV2(object):
@log_helpers.log_method_call
def __init__(self):
self.loadbalancer = NotImplementedManager()
self.listener = NotImplementedManager()
self.pool = NotImplementedManager()
self.member = NotImplementedManager()
self.health_monitor = NotImplementedManager()
self.l7policy = NotImplementedManager()
self.l7rule = NotImplementedManager()
| true
| true
|
79074355b1847676439e6af59824fc3de8401d0f
| 6,995
|
py
|
Python
|
scripts/train_cd.py
|
monocilindro/torchsat
|
5ac62e1aa9fee1d7a5a4a58914c128cf8e18cc09
|
[
"MIT"
] | 316
|
2019-08-14T11:56:13.000Z
|
2022-03-31T06:15:50.000Z
|
scripts/train_cd.py
|
monocilindro/torchsat
|
5ac62e1aa9fee1d7a5a4a58914c128cf8e18cc09
|
[
"MIT"
] | 8
|
2019-10-07T20:16:08.000Z
|
2021-09-03T18:09:20.000Z
|
scripts/train_cd.py
|
monocilindro/torchsat
|
5ac62e1aa9fee1d7a5a4a58914c128cf8e18cc09
|
[
"MIT"
] | 49
|
2019-08-14T11:55:22.000Z
|
2022-01-31T16:43:41.000Z
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ignite.metrics import IoU, Precision, Recall
import torchsat.transforms.transforms_cd as T
from torchsat.datasets.folder import ChangeDetectionDataset
from torchsat.models import FC_EF, FC_Siam_Conc, FC_Siam_Diff
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device, writer):
print('train epoch {}'.format(epoch))
model.train()
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('train-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx+1, len(dataloader), loss.item()))
writer.add_scalar('train/loss', loss.item(), len(dataloader)*epoch+idx)
def evalidation(epoch, dataloader, model, criterion, device, writer, tb_test_imgs):
print('\neval epoch {}'.format(epoch))
model.eval()
recall = Recall(lambda x: (x[0], x[1]))
precision = Precision(lambda x: (x[0], x[1]))
mean_recall = []
mean_precision = []
mean_loss = []
with torch.no_grad():
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
preds = outputs.argmax(1)
precision.update((preds, targets))
recall.update((preds, targets))
mean_loss.append(loss.item())
mean_recall.append(recall.compute().item())
mean_precision.append(precision.compute().item())
# print('val-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx + 1, len(dataloader), loss.item()))
writer.add_scalar('test/loss', loss.item(), len(dataloader) * epoch + idx)
if idx < tb_test_imgs:
writer.add_image('test/pre', pre_img[0], idx)
writer.add_image('test/post', post_img[0], idx)
writer.add_image('test/label', label[0], idx)
writer.add_image('test/pred', preds, idx)
mean_precision, mean_recall = np.array(mean_precision).mean(), np.array(mean_recall).mean()
f1 = mean_precision * mean_recall * 2 / (mean_precision + mean_recall + 1e-20)
print('precision: {:07.5}, recall: {:07.5}, f1: {:07.5}\n'.format(mean_precision, mean_recall, f1))
writer.add_scalar('test/epoch-loss', np.array(mean_loss).mean(), epoch)
writer.add_scalar('test/f1', f1, epoch)
writer.add_scalar('test/precision', mean_precision, epoch)
writer.add_scalar('test/recall', mean_recall, epoch)
def load_data(traindir, valdir, **kwargs):
"""generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset
"""
train_transform = T.Compose([
T.RandomCrop(512),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
T.Normalize(),
])
val_transform = T.Compose([
T.ToTensor(),
T.Normalize(),
])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform, )
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return dataset_train, dataset_val
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if args.device == 'cuda' else 'cpu')
# dataset and dataloader
train_data, val_data = load_data(args.train_path, args.val_path, extensions=args.extensions)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=False)
# model
# model = get_model(args.model, args.num_classes, pretrained=args.pretrained)
# model = FC_EF(num_classes=args.num_classes)
model = FC_Siam_Diff(num_classes=args.num_classes)
model.to(device)
if args.resume:
model.load_state_dict(torch.load(args.resume, map_location=device))
# TODO: resume learning rate
# loss
criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.BCELoss()
# optim and lr scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1, eta_min=1e-8)
# lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
writer = SummaryWriter(args.ckp_dir)
for epoch in range(args.epochs):
writer.add_scalar('train/lr', lr_scheduler.get_lr()[0], epoch)
train_one_epoch(epoch, train_loader, model, criterion, optimizer, device, writer)
evalidation(epoch, val_loader, model, criterion, device, writer, args.tb_test_imgs)
lr_scheduler.step()
if epoch % 2 == 0:
torch.save(model.state_dict(), os.path.join(args.ckp_dir, 'cd_epoch_{}.pth'.format(epoch)))
def parse_args():
parser = argparse.ArgumentParser(description='TorchSat Change Detection Training Script')
parser.add_argument('--train-path', help='train dataset path')
parser.add_argument('--val-path', help='validate dataset path')
parser.add_argument('--extensions', nargs='+', default='jpg', help='the train image extension')
parser.add_argument('--model', default="unet34", help='model name. default, unet34')
parser.add_argument('--pretrained', default=True, help='use ImageNet pretrained params')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-classes', default=3, type=int, help='num of classes')
parser.add_argument('--in-channels', default=3, type=int, help='input image channels')
parser.add_argument('--device', default='cpu', help='device')
parser.add_argument('-b', '--batch-size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=90, type=int, help='epochs')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--ckp-dir', default='./', help='path to save checkpoint')
parser.add_argument('--tb-test-imgs', default=10, help='the num of test image show in tensorboard')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| 42.393939
| 115
| 0.674482
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ignite.metrics import IoU, Precision, Recall
import torchsat.transforms.transforms_cd as T
from torchsat.datasets.folder import ChangeDetectionDataset
from torchsat.models import FC_EF, FC_Siam_Conc, FC_Siam_Diff
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device, writer):
print('train epoch {}'.format(epoch))
model.train()
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('train-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx+1, len(dataloader), loss.item()))
writer.add_scalar('train/loss', loss.item(), len(dataloader)*epoch+idx)
def evalidation(epoch, dataloader, model, criterion, device, writer, tb_test_imgs):
print('\neval epoch {}'.format(epoch))
model.eval()
recall = Recall(lambda x: (x[0], x[1]))
precision = Precision(lambda x: (x[0], x[1]))
mean_recall = []
mean_precision = []
mean_loss = []
with torch.no_grad():
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
preds = outputs.argmax(1)
precision.update((preds, targets))
recall.update((preds, targets))
mean_loss.append(loss.item())
mean_recall.append(recall.compute().item())
mean_precision.append(precision.compute().item())
writer.add_scalar('test/loss', loss.item(), len(dataloader) * epoch + idx)
if idx < tb_test_imgs:
writer.add_image('test/pre', pre_img[0], idx)
writer.add_image('test/post', post_img[0], idx)
writer.add_image('test/label', label[0], idx)
writer.add_image('test/pred', preds, idx)
mean_precision, mean_recall = np.array(mean_precision).mean(), np.array(mean_recall).mean()
f1 = mean_precision * mean_recall * 2 / (mean_precision + mean_recall + 1e-20)
print('precision: {:07.5}, recall: {:07.5}, f1: {:07.5}\n'.format(mean_precision, mean_recall, f1))
writer.add_scalar('test/epoch-loss', np.array(mean_loss).mean(), epoch)
writer.add_scalar('test/f1', f1, epoch)
writer.add_scalar('test/precision', mean_precision, epoch)
writer.add_scalar('test/recall', mean_recall, epoch)
def load_data(traindir, valdir, **kwargs):
train_transform = T.Compose([
T.RandomCrop(512),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
T.Normalize(),
])
val_transform = T.Compose([
T.ToTensor(),
T.Normalize(),
])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform, )
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return dataset_train, dataset_val
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if args.device == 'cuda' else 'cpu')
train_data, val_data = load_data(args.train_path, args.val_path, extensions=args.extensions)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=False)
model = FC_Siam_Diff(num_classes=args.num_classes)
model.to(device)
if args.resume:
model.load_state_dict(torch.load(args.resume, map_location=device))
criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1, eta_min=1e-8)
writer = SummaryWriter(args.ckp_dir)
for epoch in range(args.epochs):
writer.add_scalar('train/lr', lr_scheduler.get_lr()[0], epoch)
train_one_epoch(epoch, train_loader, model, criterion, optimizer, device, writer)
evalidation(epoch, val_loader, model, criterion, device, writer, args.tb_test_imgs)
lr_scheduler.step()
if epoch % 2 == 0:
torch.save(model.state_dict(), os.path.join(args.ckp_dir, 'cd_epoch_{}.pth'.format(epoch)))
def parse_args():
parser = argparse.ArgumentParser(description='TorchSat Change Detection Training Script')
parser.add_argument('--train-path', help='train dataset path')
parser.add_argument('--val-path', help='validate dataset path')
parser.add_argument('--extensions', nargs='+', default='jpg', help='the train image extension')
parser.add_argument('--model', default="unet34", help='model name. default, unet34')
parser.add_argument('--pretrained', default=True, help='use ImageNet pretrained params')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-classes', default=3, type=int, help='num of classes')
parser.add_argument('--in-channels', default=3, type=int, help='input image channels')
parser.add_argument('--device', default='cpu', help='device')
parser.add_argument('-b', '--batch-size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=90, type=int, help='epochs')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--ckp-dir', default='./', help='path to save checkpoint')
parser.add_argument('--tb-test-imgs', default=10, help='the num of test image show in tensorboard')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
| true
| true
|
7907441971f9015080aed7fa9f07895318277f60
| 694
|
py
|
Python
|
Beijin-Tuiwen/Python/gen_data.py
|
Paradise02/Interviews
|
5ac16a3e7b2a8335c9cdc9821773370ebdb42e41
|
[
"MIT"
] | null | null | null |
Beijin-Tuiwen/Python/gen_data.py
|
Paradise02/Interviews
|
5ac16a3e7b2a8335c9cdc9821773370ebdb42e41
|
[
"MIT"
] | null | null | null |
Beijin-Tuiwen/Python/gen_data.py
|
Paradise02/Interviews
|
5ac16a3e7b2a8335c9cdc9821773370ebdb42e41
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
import datetime
import random
def main(n):
start = -86400 * 365 * 20
end = 86400 * 365
filename = 'testdata-' + str(n) + '.txt'
with open(filename, 'w') as fp:
now = datetime.datetime.now()
for i in range(n):
d = datetime.timedelta(seconds=random.randint(start, end))
nd = now + d
fp.write(nd.strftime("%d/%m/%Y %H:%M:%S") + '\n')
print('generate finish {}\n'.format(filename))
if __name__ == '__main__':
if not(len(sys.argv) == 2 and sys.argv[1].isdigit()):
print('bad input, argument must be number\n')
exit()
n = int(sys.argv[1])
main(n)
| 26.692308
| 70
| 0.573487
|
from __future__ import print_function
import sys
import datetime
import random
def main(n):
start = -86400 * 365 * 20
end = 86400 * 365
filename = 'testdata-' + str(n) + '.txt'
with open(filename, 'w') as fp:
now = datetime.datetime.now()
for i in range(n):
d = datetime.timedelta(seconds=random.randint(start, end))
nd = now + d
fp.write(nd.strftime("%d/%m/%Y %H:%M:%S") + '\n')
print('generate finish {}\n'.format(filename))
if __name__ == '__main__':
if not(len(sys.argv) == 2 and sys.argv[1].isdigit()):
print('bad input, argument must be number\n')
exit()
n = int(sys.argv[1])
main(n)
| true
| true
|
790744d91f2dc3fb98ec2c16dca15372d174634c
| 12,353
|
py
|
Python
|
devito/ir/support/vector.py
|
rhodrin/devito
|
cd1ae745272eb0315aa1c36038a3174f1817e0d0
|
[
"MIT"
] | null | null | null |
devito/ir/support/vector.py
|
rhodrin/devito
|
cd1ae745272eb0315aa1c36038a3174f1817e0d0
|
[
"MIT"
] | null | null | null |
devito/ir/support/vector.py
|
rhodrin/devito
|
cd1ae745272eb0315aa1c36038a3174f1817e0d0
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
| 34.409471
| 87
| 0.567393
|
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
return self - other
class LabeledVector(Vector):
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
def vmin(*vectors):
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
| true
| true
|
790744f314f83ace6dfe1f19dfdd13e452a06bb7
| 105
|
py
|
Python
|
models_jittor/utils/__init__.py
|
liuruiyang98/Jittor-MLP
|
b86656b65cf5f18ba9eb760d1f7565ed95e7e96e
|
[
"MIT"
] | 49
|
2021-10-06T11:22:19.000Z
|
2022-03-25T03:01:10.000Z
|
models_jittor/utils/__init__.py
|
liuruiyang98/Jittor-MLP
|
b86656b65cf5f18ba9eb760d1f7565ed95e7e96e
|
[
"MIT"
] | 2
|
2021-11-08T08:06:35.000Z
|
2022-02-18T09:23:07.000Z
|
models_jittor/utils/__init__.py
|
liuruiyang98/Jittor-MLP
|
b86656b65cf5f18ba9eb760d1f7565ed95e7e96e
|
[
"MIT"
] | 7
|
2021-10-10T03:25:27.000Z
|
2022-03-08T10:44:15.000Z
|
from .tools import pair, check_sizes
from .dcn_v2 import deform_conv2d_jt
from .init import trunc_normal_
| 35
| 36
| 0.847619
|
from .tools import pair, check_sizes
from .dcn_v2 import deform_conv2d_jt
from .init import trunc_normal_
| true
| true
|
790744f5e3959abab7371411b9cc5ea16cad805a
| 643
|
py
|
Python
|
test.py
|
gemasphi/alpha-zero-torch
|
ccaf23266c0cc61f4c84294681adc522609d0470
|
[
"MIT"
] | 6
|
2019-11-14T19:16:57.000Z
|
2020-11-08T13:53:30.000Z
|
test.py
|
gemasphi/alpha-zero-torch
|
ccaf23266c0cc61f4c84294681adc522609d0470
|
[
"MIT"
] | 2
|
2020-02-14T20:10:09.000Z
|
2021-12-20T03:43:43.000Z
|
test.py
|
gemasphi/alpha-zero-torch
|
ccaf23266c0cc61f4c84294681adc522609d0470
|
[
"MIT"
] | 2
|
2020-09-02T11:39:01.000Z
|
2021-12-02T22:05:50.000Z
|
from src.NN import NetWrapper
from src.games.Tictactoe import Tictactoe
from src.Player import *
from src.MCTS import MCTS
import yaml
with open("config.yaml", 'r') as f:
config = yaml.safe_load(f)
game = Tictactoe(**config['GAME'])
nn = NetWrapper(game, **config['NN'])
nn.load_model("models/the_bestest_of_models.pt")
nn1 = NetWrapper(game, **config['NN'])
nn1.load_model()
mcts = MCTS(**config['MCTS'])
play_game(game, p1 = AlphaZeroPlayer(nn1, mcts), p2 = HumanPlayer(), print_b = True)
#player_vs_player(game, p1 = AlphaZeroPlayer(nn, mcts), p2 = AlphaZeroPlayer(nn1, mcts), n_games = 100, treshold = 0.5, print_b = False)
| 30.619048
| 139
| 0.710731
|
from src.NN import NetWrapper
from src.games.Tictactoe import Tictactoe
from src.Player import *
from src.MCTS import MCTS
import yaml
with open("config.yaml", 'r') as f:
config = yaml.safe_load(f)
game = Tictactoe(**config['GAME'])
nn = NetWrapper(game, **config['NN'])
nn.load_model("models/the_bestest_of_models.pt")
nn1 = NetWrapper(game, **config['NN'])
nn1.load_model()
mcts = MCTS(**config['MCTS'])
play_game(game, p1 = AlphaZeroPlayer(nn1, mcts), p2 = HumanPlayer(), print_b = True)
| true
| true
|
79074576a4397f66ed22d6acfecda5fe1de1ccf8
| 15,849
|
py
|
Python
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SpectralOpsTest(test.TestCase, parameterized.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length, tol):
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(
[actual_stft, actual_stft_from_ph, actual_inverse_stft])
actual_stft_ph = array_ops.placeholder_with_default(
actual_stft, shape=actual_stft.shape)
actual_inverse_stft_from_ph = self.evaluate(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length))
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)
def test_shapes(self):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@parameterized.parameters(
(512, 64, 32, 64, np.float32, 1e-4),
(512, 64, 32, 64, np.float64, 1e-8),
(512, 64, 64, 64, np.float32, 1e-4),
(512, 64, 64, 64, np.float64, 1e-8),
(512, 72, 64, 64, np.float32, 1e-4),
(512, 72, 64, 64, np.float64, 1e-8),
(512, 64, 25, 64, np.float32, 1e-4),
(512, 64, 25, 64, np.float64, 1e-8),
(512, 25, 15, 36, np.float32, 1e-4),
(512, 25, 15, 36, np.float64, 1e-8),
(123, 23, 5, 42, np.float32, 1e-4),
(123, 23, 5, 42, np.float64, 1e-8))
def test_stft_and_inverse_stft(self, signal_length, frame_length,
frame_step, fft_length, np_rtype, tol):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
signal = np.random.random(signal_length).astype(np_rtype)
self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters(
# 87.5% overlap.
(4096, 256, 32, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 32, 256, np.float64, 1e-8, 1e-8),
# 75% overlap.
(4096, 256, 64, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 64, 256, np.float64, 1e-8, 1e-8),
# Odd frame hop.
(4096, 128, 25, 128, np.float32, 1e-3, 1e-6),
(4096, 128, 25, 128, np.float64, 5e-4, 1e-8),
# Odd frame length.
(4096, 127, 32, 128, np.float32, 1e-3, 1e-6),
(4096, 127, 32, 128, np.float64, 1e-3, 1e-8),
# 50% overlap.
(4096, 128, 64, 128, np.float32, 0.4, 1e-6),
(4096, 128, 64, 128, np.float64, 0.4, 1e-8))
def test_stft_round_trip(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, threshold,
corrected_threshold):
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
inverse_stft, inverse_stft_corrected = self.evaluate(
[inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
@parameterized.parameters(
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64))
def test_inverse_stft_window_fn(self, frame_length, frame_step):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
hann_window, inverse_window = self.evaluate([hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32))
def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
# TODO(rjryan): Update gradient tests for Eager.
if context.executing_eagerly():
return
with self.session() as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
@parameterized.parameters(
(64, 16, 8, 16, np.float32, 2e-3, 5e-4),
(64, 16, 8, 16, np.float64, 1e-8, 1e-8),
(64, 16, 16, 16, np.float32, 2e-3, 5e-4),
(64, 16, 16, 16, np.float64, 1e-8, 1e-8),
(64, 16, 7, 16, np.float32, 2e-3, 5e-4),
(64, 16, 7, 16, np.float64, 1e-8, 1e-8),
(64, 7, 4, 9, np.float32, 2e-3, 5e-4),
(64, 7, 4, 9, np.float64, 1e-8, 1e-8),
(29, 5, 1, 10, np.float32, 2e-3, 5e-4),
(29, 5, 1, 10, np.float64, 1e-8, 1e-8))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="On ROCm, this fails with mismatches at some locations "
"(possibly due to peculiarities of rocFFT - investigate)")
def test_gradients_numerical(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, forward_tol, backward_tol):
# TODO(rjryan): Investigate why STFT gradient error is so high.
signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1
def forward(signal):
return spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(
forward, [signal])
self.assertAllClose(f_jacob_t, f_jacob_n,
rtol=forward_tol, atol=forward_tol)
def backward(stft):
return spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length)
stft = forward(signal)
((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(
backward, [stft])
self.assertAllClose(b_jacob_t, b_jacob_n,
rtol=backward_tol, atol=backward_tol)
@parameterized.parameters(
itertools.product(
(4000,),
(256,),
(np.float32, np.float64),
("ortho", None),
("vorbis", "kaiser_bessel_derived", None),
(False, True)))
def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,
norm, window_type, pad_end):
if np_rtype == np.float32:
tol = 1e-5
else:
if window_type == "kaiser_bessel_derived":
tol = 1e-6
else:
tol = 1e-8
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
if window_type == "vorbis":
window_fn = window_ops.vorbis_window
elif window_type == "kaiser_bessel_derived":
window_fn = window_ops.kaiser_bessel_derived_window
elif window_type is None:
window_fn = None
mdct = spectral_ops.mdct(signal, frame_length, norm=norm,
window_fn=window_fn, pad_end=pad_end)
inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,
window_fn=window_fn)
inverse_mdct = self.evaluate(inverse_mdct)
# Truncate signal and inverse_mdct to their minimum length.
min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])
# Ignore the half_len samples at either edge.
half_len = frame_length // 2
signal = signal[half_len:min_length-half_len]
inverse_mdct = inverse_mdct[half_len:min_length-half_len]
# Check that the inverse and original signal are close.
self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)
if __name__ == "__main__":
test.main()
| 43.661157
| 80
| 0.678087
|
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SpectralOpsTest(test.TestCase, parameterized.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length, tol):
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(
[actual_stft, actual_stft_from_ph, actual_inverse_stft])
actual_stft_ph = array_ops.placeholder_with_default(
actual_stft, shape=actual_stft.shape)
actual_inverse_stft_from_ph = self.evaluate(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length))
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)
def test_shapes(self):
signal = np.zeros((512,)).astype(np.float32)
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@parameterized.parameters(
(512, 64, 32, 64, np.float32, 1e-4),
(512, 64, 32, 64, np.float64, 1e-8),
(512, 64, 64, 64, np.float32, 1e-4),
(512, 64, 64, 64, np.float64, 1e-8),
(512, 72, 64, 64, np.float32, 1e-4),
(512, 72, 64, 64, np.float64, 1e-8),
(512, 64, 25, 64, np.float32, 1e-4),
(512, 64, 25, 64, np.float64, 1e-8),
(512, 25, 15, 36, np.float32, 1e-4),
(512, 25, 15, 36, np.float64, 1e-8),
(123, 23, 5, 42, np.float32, 1e-4),
(123, 23, 5, 42, np.float64, 1e-8))
def test_stft_and_inverse_stft(self, signal_length, frame_length,
frame_step, fft_length, np_rtype, tol):
signal = np.random.random(signal_length).astype(np_rtype)
self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters(
(4096, 256, 32, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 32, 256, np.float64, 1e-8, 1e-8),
(4096, 256, 64, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 64, 256, np.float64, 1e-8, 1e-8),
(4096, 128, 25, 128, np.float32, 1e-3, 1e-6),
(4096, 128, 25, 128, np.float64, 5e-4, 1e-8),
(4096, 127, 32, 128, np.float32, 1e-3, 1e-6),
(4096, 127, 32, 128, np.float64, 1e-3, 1e-8),
(4096, 128, 64, 128, np.float32, 0.4, 1e-6),
(4096, 128, 64, 128, np.float64, 0.4, 1e-8))
def test_stft_round_trip(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, threshold,
corrected_threshold):
signal = np.random.normal(size=signal_length).astype(np_rtype)
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
inverse_stft, inverse_stft_corrected = self.evaluate(
[inverse_stft, inverse_stft_corrected])
signal = signal[:inverse_stft.shape[0]]
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
@parameterized.parameters(
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64))
def test_inverse_stft_window_fn(self, frame_length, frame_step):
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
hann_window, inverse_window = self.evaluate([hann_window, inverse_window])
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32))
def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
if context.executing_eagerly():
return
with self.session() as sess:
signal_length = 512
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
@parameterized.parameters(
(64, 16, 8, 16, np.float32, 2e-3, 5e-4),
(64, 16, 8, 16, np.float64, 1e-8, 1e-8),
(64, 16, 16, 16, np.float32, 2e-3, 5e-4),
(64, 16, 16, 16, np.float64, 1e-8, 1e-8),
(64, 16, 7, 16, np.float32, 2e-3, 5e-4),
(64, 16, 7, 16, np.float64, 1e-8, 1e-8),
(64, 7, 4, 9, np.float32, 2e-3, 5e-4),
(64, 7, 4, 9, np.float64, 1e-8, 1e-8),
(29, 5, 1, 10, np.float32, 2e-3, 5e-4),
(29, 5, 1, 10, np.float64, 1e-8, 1e-8))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="On ROCm, this fails with mismatches at some locations "
"(possibly due to peculiarities of rocFFT - investigate)")
def test_gradients_numerical(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, forward_tol, backward_tol):
signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1
def forward(signal):
return spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(
forward, [signal])
self.assertAllClose(f_jacob_t, f_jacob_n,
rtol=forward_tol, atol=forward_tol)
def backward(stft):
return spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length)
stft = forward(signal)
((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(
backward, [stft])
self.assertAllClose(b_jacob_t, b_jacob_n,
rtol=backward_tol, atol=backward_tol)
@parameterized.parameters(
itertools.product(
(4000,),
(256,),
(np.float32, np.float64),
("ortho", None),
("vorbis", "kaiser_bessel_derived", None),
(False, True)))
def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,
norm, window_type, pad_end):
if np_rtype == np.float32:
tol = 1e-5
else:
if window_type == "kaiser_bessel_derived":
tol = 1e-6
else:
tol = 1e-8
signal = np.random.normal(size=signal_length).astype(np_rtype)
if window_type == "vorbis":
window_fn = window_ops.vorbis_window
elif window_type == "kaiser_bessel_derived":
window_fn = window_ops.kaiser_bessel_derived_window
elif window_type is None:
window_fn = None
mdct = spectral_ops.mdct(signal, frame_length, norm=norm,
window_fn=window_fn, pad_end=pad_end)
inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,
window_fn=window_fn)
inverse_mdct = self.evaluate(inverse_mdct)
min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])
half_len = frame_length // 2
signal = signal[half_len:min_length-half_len]
inverse_mdct = inverse_mdct[half_len:min_length-half_len]
self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)
if __name__ == "__main__":
test.main()
| true
| true
|
7907463be0399381dbb251da2399a40b35f47313
| 986
|
py
|
Python
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | 2
|
2019-04-06T05:08:15.000Z
|
2019-04-06T19:23:44.000Z
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | null | null | null |
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from {{cookiecutter.app_name}}.config import app_config
from {{cookiecutter.app_name}}.models import db, bcrypt
from {{cookiecutter.app_name}}.resources import Login, Register
from {{cookiecutter.app_name}}.schemas import ma
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
CORS(app)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
ma.init_app(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
# Route
api = Api(app)
# user endpoint
api.add_resource(Login, '/auth/login')
api.add_resource(Register, '/auth/register')
return app
| 22.930233
| 63
| 0.704868
|
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from {{cookiecutter.app_name}}.config import app_config
from {{cookiecutter.app_name}}.models import db, bcrypt
from {{cookiecutter.app_name}}.resources import Login, Register
from {{cookiecutter.app_name}}.schemas import ma
def create_app(env_name):
"""
Create app
"""
app = Flask(__name__)
CORS(app)
app.config.from_object(app_config[env_name])
bcrypt.init_app(app)
db.init_app(app)
ma.init_app(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
api = Api(app)
api.add_resource(Login, '/auth/login')
api.add_resource(Register, '/auth/register')
return app
| false
| true
|
7907478bf8476f28d143ed5e90764ecaf0e5451f
| 22,182
|
py
|
Python
|
parlai/agents/seq2seq/seq2seq.py
|
lifelongeek/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:56:49.000Z
|
2017-10-06T09:57:03.000Z
|
parlai/agents/seq2seq/seq2seq.py
|
gmkim90/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | null | null | null |
parlai/agents/seq2seq/seq2seq.py
|
gmkim90/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:57:04.000Z
|
2018-11-08T13:45:47.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import os
import random
import pdb
class Seq2seqAgent(Agent):
"""Simple agent which uses an RNN to process incoming text observations.
The RNN generates a vector which is used to represent the input text,
conditioning on the context to generate an output token-by-token.
For more information, see Sequence to Sequence Learning with Neural Networks
`(Sutskever et al. 2014) <https://arxiv.org/abs/1409.3215>`_.
"""
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
DictionaryAgent.add_cmdline_args(argparser)
agent = argparser.add_argument_group('Seq2Seq Arguments')
agent.add_argument('-hs', '--hiddensize', type=int, default=128,
help='size of the hidden layers and embeddings')
agent.add_argument('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
agent.add_argument('-lr', '--learningrate', type=float, default=0.001, help='learning rate')
agent.add_argument('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
# agent.add_argument('-att', '--attention', type='bool', default=False,
# help='whether to use attention over the context during decoding')
# agent.add_argument('-bi', '--bidirectional', type='bool', default=False,
# help='whether to encode the context with a bidirectional RNN')
agent.add_argument('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
agent.add_argument('--gpu', type=int, default=-1,
help='which GPU device to use')
agent.add_argument('-rc', '--rank-candidates', type='bool', default=False,
help='rank candidates if available. this is done by computing the' +
' mean score per token for each candidate and selecting the ' +
'highest scoring one.')
def __init__(self, opt, shared=None):
# initialize defaults first
super().__init__(opt, shared)
if not shared:
# this is not a shared instance of this class, so do full
# initialization. if shared is set, only set up shared members.
# check for cuda
self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
if self.use_cuda:
print('[ Using CUDA ]')
torch.cuda.set_device(opt['gpu'])
if opt.get('model_file') and os.path.isfile(opt['model_file']):
# load model parameters if available
print('Loading existing model params from ' + opt['model_file'])
new_opt, self.states = self.load(opt['model_file'])
# override options with stored ones
opt = self.override_opt(new_opt)
self.dict = DictionaryAgent(opt)
self.id = 'Seq2Seq'
# we use START markers to start our output
self.START = self.dict.start_token
self.START_TENSOR = torch.LongTensor(self.dict.parse(self.START))
# we use END markers to end our output
self.END = self.dict.end_token
self.END_TENSOR = torch.LongTensor(self.dict.parse(self.END))
# get index of null token from dictionary (probably 0)
self.NULL_IDX = self.dict.txt2vec(self.dict.null_token)[0]
# logFile
#self.logFile = opt['logFile']
# store important params directly
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.rank = opt['rank_candidates']
self.longest_label = 1
# set up tensors
self.zeros = torch.zeros(self.num_layers, 1, hsz)
self.xs = torch.LongTensor(1, 1)
self.ys = torch.LongTensor(1, 1)
self.cands = torch.LongTensor(1, 1, 1)
self.cand_scores = torch.FloatTensor(1)
self.cand_lengths = torch.LongTensor(1)
# set up modules
self.criterion = nn.NLLLoss()
# lookup table stores word embeddings
self.lt = nn.Embedding() # FILL HERE
# encoder captures the input text
self.encoder = nn.GRU() # FILL HERE
# decoder produces our output states
self.decoder = nn.GRU() # FILL HERE
# linear layer helps us produce outputs from final decoder state
self.h2o = nn.Linear() # FILL HERE
# droput on the linear layer helps us generalize
self.dropout = nn.Dropout(opt['dropout'])
# softmax maps output scores to probabilities
self.softmax = nn.LogSoftmax()
# set up optims for each module
lr = opt['learningrate']
"""
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'h2o': optim.SGD(self.h2o.parameters(), lr=lr),
}
"""
self.optims = {
'lt': optim.Adam(self.lt.parameters(), lr=lr),
'encoder': optim.Adam(self.encoder.parameters(), lr=lr),
'decoder': optim.Adam(self.decoder.parameters(), lr=lr),
'h2o': optim.Adam(self.h2o.parameters(), lr=lr),
}
if hasattr(self, 'states'):
# set loaded states if applicable
self.set_states(self.states)
if self.use_cuda:
self.cuda()
self.episode_done = True
def override_opt(self, new_opt):
"""Print out each added key and each overriden key.
Only override args specific to the model.
"""
model_args = {'hiddensize', 'numlayers'}
for k, v in new_opt.items():
if k not in model_args:
# skip non-model args
continue
if k not in self.opt:
print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
elif self.opt[k] != v:
print('Overriding option [ {k}: {old} => {v}]'.format(
k=k, old=self.opt[k], v=v))
self.opt[k] = v
return self.opt
def parse(self, text):
return self.dict.txt2vec(text)
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.START_TENSOR = self.START_TENSOR.cuda(async=True)
self.END_TENSOR = self.END_TENSOR.cuda(async=True)
self.zeros = self.zeros.cuda(async=True)
self.xs = self.xs.cuda(async=True)
self.ys = self.ys.cuda(async=True)
self.cands = self.cands.cuda(async=True)
self.cand_scores = self.cand_scores.cuda(async=True)
self.cand_lengths = self.cand_lengths.cuda(async=True)
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.h2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, dropout=False):
"""Converts hidden state vectors into indices into the dictionary."""
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = # FILL HERE
if dropout:
scores = # FILL HERE
scores = # FILL HERE
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def reset(self):
self.observation = None
self.episode_done = True
def observe(self, observation):
# shallow copy observation (deep copy can be expensive)
observation = observation.copy()
if not self.episode_done:
# if the last example wasn't the end of an episode, then we need to
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def predict(self, xs, ys=None, cands=None):
"""Produce a prediction from our model. Update the model using the
targets if available.
"""
batchsize = len(xs)
text_cand_inds = None
# first encode context
#xes = self.lt(xs).t()
xes = self.lt(xs).transpose(0,1) # Ken
if self.zeros.size(1) != batchsize:
self.zeros.resize_(self.num_layers, batchsize, self.hidden_size).fill_(0)
h0 = Variable(self.zeros)
_output, hn = self.encoder(xes, h0)
# next we use END as an input to kick off our decoder
x = Variable(self.START_TENSOR)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
# list of output tokens for each example in the batch
output_lines = [[] for _ in range(batchsize)]
if ys is not None:
# update the model based on the labels
self.zero_grad()
loss = 0
# keep track of longest label we've ever seen
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
# use the true token as the next input instead of predicted
# this produces a biased prediction but better training
xes = self.lt(y).unsqueeze(0)
for b in range(batchsize):
# convert the output scores to tokens
token = self.v2t([preds.data[b][0]])
#token = self.v2t([preds.data[b]]) # Ken
output_lines[b].append(token)
loss.backward()
#pdb.set_trace()
self.update_params()
if random.random() < 0.01:
# sometimes output a prediction for debugging
self.nWord = ys.data.nonzero().size()[0]
self.nll_per_word = loss.data[0]/self.nWord
print('prediction:', ' '.join(output_lines[0]),
'\nlabel:', self.dict.vec2txt(ys.data[0]))
else:
# just produce a prediction without training the model
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
if cands:
# score each candidate separately
# cands are exs_with_cands x cands_per_ex x words_per_cand
# cview is total_cands x words_per_cand
cview = cands.view(-1, cands.size(2))
cands_xes = xe.expand(xe.size(0), cview.size(0), xe.size(2))
sz = hn.size()
cands_hn = (
hn.view(sz[0], sz[1], 1, sz[2])
.expand(sz[0], sz[1], cands.size(1), sz[2])
.contiguous()
.view(sz[0], -1, sz[2])
)
cand_scores = Variable(
self.cand_scores.resize_(cview.size(0)).fill_(0))
cand_lengths = Variable(
self.cand_lengths.resize_(cview.size(0)).fill_(0))
for i in range(cview.size(1)):
output, cands_hn = self.decoder(cands_xes, cands_hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
cs = cview.select(1, i)
non_nulls = cs.ne(self.NULL_IDX)
cand_lengths += non_nulls.long()
score_per_cand = torch.gather(scores, 1, cs.unsqueeze(1))
cand_scores += score_per_cand.squeeze() * non_nulls.float()
cands_xes = self.lt(cs).unsqueeze(0)
# set empty scores to -1, so when divided by 0 they become -inf
cand_scores -= cand_lengths.eq(0).float()
# average the scores per token
cand_scores /= cand_lengths.float()
cand_scores = cand_scores.view(cands.size(0), cands.size(1))
srtd_scores, text_cand_inds = cand_scores.sort(1, True)
text_cand_inds = text_cand_inds.data
# now, generate a response from scratch
while(total_done < batchsize) and max_len < self.longest_label:
# keep producing tokens until we hit END or max length for each
# example in the batch
#pdb.set_trace()
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
#xes = self.lt(preds.t()) # original
"""
if (self.opt['mode'] == 'train'):
xes = torch.unsqueeze(self.lt(preds),0) # torch.unsqueeze makes error when prediction
elif(self.opt['mode'] == 'interactive'):
xes = self.lt(preds)
"""
xes = torch.unsqueeze(self.lt(preds),0) # KB-KAIST
max_len += 1
for b in range(batchsize):
if not done[b]:
# only add more tokens for examples that aren't done yet
#pdb.set_trace()
#token = self.v2t(preds.data[b])
token = self.v2t([preds.data[b]]) # KB-KAIST
if token == self.END:
# if we produced END, we're done
done[b] = True
total_done += 1
else:
output_lines[b].append(token)
if (random.random() < 0.1 and self.opt['mode'] == 'train'):
# sometimes output a prediction for debugging
print('prediction:', ' '.join(output_lines[0]))
return output_lines, text_cand_inds
def batchify(self, observations):
"""Convert a list of observations into input & target tensors."""
# valid examples
exs = [ex for ex in observations if 'text' in ex]
# the indices of the valid (non-empty) tensors
valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]
# set up the input tensors
batchsize = len(exs)
# tokenize the text
xs = None
if batchsize > 0:
parsed = [self.parse(ex['text']) for ex in exs]
min_x_len = min([len(x) for x in parsed])
max_x_len = max([len(x) for x in parsed])
parsed_x_len = min(min_x_len + 12, max_x_len, 48)
# shrink xs to to limit batch computation
parsed = [x[:parsed_x_len] for x in parsed]
xs = torch.LongTensor(batchsize, parsed_x_len).fill_(0)
# pack the data to the right side of the tensor for this model
for i, x in enumerate(parsed):
offset = parsed_x_len - len(x)
for j, idx in enumerate(x):
xs[i][j + offset] = idx
if self.use_cuda:
# copy to gpu
self.xs.resize_(xs.size())
self.xs.copy_(xs, async=True)
xs = Variable(self.xs)
else:
xs = Variable(xs)
# set up the target tensors
ys = None
if batchsize > 0 and any(['labels' in ex for ex in exs]):
# randomly select one of the labels to update on, if multiple
# append END to each label
labels = [random.choice(ex.get('labels', [''])) + ' ' + self.END for ex in exs]
parsed = [self.parse(y) for y in labels]
min_y_len = min(len(y) for y in parsed)
max_y_len = max(len(y) for y in parsed)
# shrink ys to to limit batch computation
parsed_y_len = min(min_y_len + 6, max_y_len)
parsed = [y[:parsed_y_len] for y in parsed]
ys = torch.LongTensor(batchsize, parsed_y_len).fill_(0)
for i, y in enumerate(parsed):
for j, idx in enumerate(y):
ys[i][j] = idx
if self.use_cuda:
# copy to gpu
self.ys.resize_(ys.size())
self.ys.copy_(ys, async=True)
ys = Variable(self.ys)
else:
ys = Variable(ys)
# set up candidates
cands = None
valid_cands = None
if ys is None and self.rank:
# only do ranking when no targets available and ranking flag set
parsed = []
valid_cands = []
for i in valid_inds:
if 'label_candidates' in observations[i]:
# each candidate tuple is a pair of the parsed version and
# the original full string
cs = list(observations[i]['label_candidates'])
parsed.append([self.parse(c) for c in cs])
valid_cands.append((i, cs))
if len(parsed) > 0:
# TODO: store lengths of cands separately, so don't have zero
# padding for varying number of cands per example
# found cands, pack them into tensor
max_c_len = max(max(len(c) for c in cs) for cs in parsed)
max_c_cnt = max(len(cs) for cs in parsed)
cands = torch.LongTensor(len(parsed), max_c_cnt, max_c_len).fill_(0)
for i, cs in enumerate(parsed):
for j, c in enumerate(cs):
for k, idx in enumerate(c):
cands[i][j][k] = idx
if self.use_cuda:
# copy to gpu
self.cands.resize_(cands.size())
self.cands.copy_(cands, async=True)
cands = Variable(self.cands)
else:
cands = Variable(cands)
return xs, ys, valid_inds, cands, valid_cands
def batch_act(self, observations):
batchsize = len(observations)
# initialize a table of replies with this agent's id
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
# convert the observations into batches of inputs and targets
# valid_inds tells us the indices of all valid examples
# e.g. for input [{}, {'text': 'hello'}, {}, {}], valid_inds is [1]
# since the other three elements had no 'text' field
xs, ys, valid_inds, cands, valid_cands = self.batchify(observations)
if xs is None:
# no valid examples, just return the empty responses we set up
return batch_reply
# produce predictions either way, but use the targets if available
predictions, text_cand_inds = self.predict(xs, ys, cands)
for i in range(len(predictions)):
# map the predictions back to non-empty examples in the batch
# we join with spaces since we produce tokens one at a time
curr = batch_reply[valid_inds[i]]
curr['text'] = ' '.join(c for c in predictions[i] if c != self.END
and c != self.dict.null_token)
if text_cand_inds is not None:
for i in range(len(valid_cands)):
order = text_cand_inds[i]
batch_idx, curr_cands = valid_cands[i]
curr = batch_reply[batch_idx]
curr['text_candidates'] = [curr_cands[idx] for idx in order
if idx < len(curr_cands)]
return batch_reply
def act(self):
# call batch_act with this batch of one
return self.batch_act([self.observation])[0]
def save(self, path=None):
path = self.opt.get('model_file', None) if path is None else path
if path and hasattr(self, 'lt'):
model = {}
model['lt'] = self.lt.state_dict()
model['encoder'] = self.encoder.state_dict()
model['decoder'] = self.decoder.state_dict()
model['h2o'] = self.h2o.state_dict()
model['longest_label'] = self.longest_label
model['opt'] = self.opt
with open(path, 'wb') as write:
torch.save(model, write)
def shutdown(self):
"""Save the state of the model when shutdown."""
path = self.opt.get('model_file', None)
if path is not None:
self.save(path + '.shutdown_state')
super().shutdown()
def load(self, path):
"""Return opt and model states."""
with open(path, 'rb') as read:
model = torch.load(read)
return model['opt'], model
def set_states(self, states):
"""Set the state dicts of the modules from saved states."""
#pdb.set_trace()
self.lt.load_state_dict(states['lt'])
self.encoder.load_state_dict(states['encoder'])
self.decoder.load_state_dict(states['decoder'])
self.h2o.load_state_dict(states['h2o'])
self.longest_label = states['longest_label']
| 42.251429
| 105
| 0.552069
|
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import os
import random
import pdb
class Seq2seqAgent(Agent):
"""Simple agent which uses an RNN to process incoming text observations.
The RNN generates a vector which is used to represent the input text,
conditioning on the context to generate an output token-by-token.
For more information, see Sequence to Sequence Learning with Neural Networks
`(Sutskever et al. 2014) <https://arxiv.org/abs/1409.3215>`_.
"""
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
DictionaryAgent.add_cmdline_args(argparser)
agent = argparser.add_argument_group('Seq2Seq Arguments')
agent.add_argument('-hs', '--hiddensize', type=int, default=128,
help='size of the hidden layers and embeddings')
agent.add_argument('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
agent.add_argument('-lr', '--learningrate', type=float, default=0.001, help='learning rate')
agent.add_argument('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
agent.add_argument('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
agent.add_argument('--gpu', type=int, default=-1,
help='which GPU device to use')
agent.add_argument('-rc', '--rank-candidates', type='bool', default=False,
help='rank candidates if available. this is done by computing the' +
' mean score per token for each candidate and selecting the ' +
'highest scoring one.')
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not shared:
self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
if self.use_cuda:
print('[ Using CUDA ]')
torch.cuda.set_device(opt['gpu'])
if opt.get('model_file') and os.path.isfile(opt['model_file']):
print('Loading existing model params from ' + opt['model_file'])
new_opt, self.states = self.load(opt['model_file'])
opt = self.override_opt(new_opt)
self.dict = DictionaryAgent(opt)
self.id = 'Seq2Seq'
self.START = self.dict.start_token
self.START_TENSOR = torch.LongTensor(self.dict.parse(self.START))
self.END = self.dict.end_token
self.END_TENSOR = torch.LongTensor(self.dict.parse(self.END))
self.NULL_IDX = self.dict.txt2vec(self.dict.null_token)[0]
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.rank = opt['rank_candidates']
self.longest_label = 1
self.zeros = torch.zeros(self.num_layers, 1, hsz)
self.xs = torch.LongTensor(1, 1)
self.ys = torch.LongTensor(1, 1)
self.cands = torch.LongTensor(1, 1, 1)
self.cand_scores = torch.FloatTensor(1)
self.cand_lengths = torch.LongTensor(1)
self.criterion = nn.NLLLoss()
self.lt = nn.Embedding()
self.encoder = nn.GRU()
self.decoder = nn.GRU()
self.h2o = nn.Linear()
self.dropout = nn.Dropout(opt['dropout'])
self.softmax = nn.LogSoftmax()
lr = opt['learningrate']
"""
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'h2o': optim.SGD(self.h2o.parameters(), lr=lr),
}
"""
self.optims = {
'lt': optim.Adam(self.lt.parameters(), lr=lr),
'encoder': optim.Adam(self.encoder.parameters(), lr=lr),
'decoder': optim.Adam(self.decoder.parameters(), lr=lr),
'h2o': optim.Adam(self.h2o.parameters(), lr=lr),
}
if hasattr(self, 'states'):
self.set_states(self.states)
if self.use_cuda:
self.cuda()
self.episode_done = True
def override_opt(self, new_opt):
"""Print out each added key and each overriden key.
Only override args specific to the model.
"""
model_args = {'hiddensize', 'numlayers'}
for k, v in new_opt.items():
if k not in model_args:
continue
if k not in self.opt:
print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
elif self.opt[k] != v:
print('Overriding option [ {k}: {old} => {v}]'.format(
k=k, old=self.opt[k], v=v))
self.opt[k] = v
return self.opt
def parse(self, text):
return self.dict.txt2vec(text)
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.START_TENSOR = self.START_TENSOR.cuda(async=True)
self.END_TENSOR = self.END_TENSOR.cuda(async=True)
self.zeros = self.zeros.cuda(async=True)
self.xs = self.xs.cuda(async=True)
self.ys = self.ys.cuda(async=True)
self.cands = self.cands.cuda(async=True)
self.cand_scores = self.cand_scores.cuda(async=True)
self.cand_lengths = self.cand_lengths.cuda(async=True)
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.h2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, dropout=False):
"""Converts hidden state vectors into indices into the dictionary."""
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores =
if dropout:
scores =
scores =
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def reset(self):
self.observation = None
self.episode_done = True
def observe(self, observation):
observation = observation.copy()
if not self.episode_done:
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def predict(self, xs, ys=None, cands=None):
"""Produce a prediction from our model. Update the model using the
targets if available.
"""
batchsize = len(xs)
text_cand_inds = None
# first encode context
#xes = self.lt(xs).t()
xes = self.lt(xs).transpose(0,1) # Ken
if self.zeros.size(1) != batchsize:
self.zeros.resize_(self.num_layers, batchsize, self.hidden_size).fill_(0)
h0 = Variable(self.zeros)
_output, hn = self.encoder(xes, h0)
# next we use END as an input to kick off our decoder
x = Variable(self.START_TENSOR)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
# list of output tokens for each example in the batch
output_lines = [[] for _ in range(batchsize)]
if ys is not None:
# update the model based on the labels
self.zero_grad()
loss = 0
# keep track of longest label we've ever seen
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
xes = self.lt(y).unsqueeze(0)
for b in range(batchsize):
token = self.v2t([preds.data[b][0]])
output_lines[b].append(token)
loss.backward()
self.update_params()
if random.random() < 0.01:
self.nWord = ys.data.nonzero().size()[0]
self.nll_per_word = loss.data[0]/self.nWord
print('prediction:', ' '.join(output_lines[0]),
'\nlabel:', self.dict.vec2txt(ys.data[0]))
else:
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
if cands:
cview = cands.view(-1, cands.size(2))
cands_xes = xe.expand(xe.size(0), cview.size(0), xe.size(2))
sz = hn.size()
cands_hn = (
hn.view(sz[0], sz[1], 1, sz[2])
.expand(sz[0], sz[1], cands.size(1), sz[2])
.contiguous()
.view(sz[0], -1, sz[2])
)
cand_scores = Variable(
self.cand_scores.resize_(cview.size(0)).fill_(0))
cand_lengths = Variable(
self.cand_lengths.resize_(cview.size(0)).fill_(0))
for i in range(cview.size(1)):
output, cands_hn = self.decoder(cands_xes, cands_hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
cs = cview.select(1, i)
non_nulls = cs.ne(self.NULL_IDX)
cand_lengths += non_nulls.long()
score_per_cand = torch.gather(scores, 1, cs.unsqueeze(1))
cand_scores += score_per_cand.squeeze() * non_nulls.float()
cands_xes = self.lt(cs).unsqueeze(0)
cand_scores -= cand_lengths.eq(0).float()
cand_scores /= cand_lengths.float()
cand_scores = cand_scores.view(cands.size(0), cands.size(1))
srtd_scores, text_cand_inds = cand_scores.sort(1, True)
text_cand_inds = text_cand_inds.data
while(total_done < batchsize) and max_len < self.longest_label:
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, dropout=False)
"""
if (self.opt['mode'] == 'train'):
xes = torch.unsqueeze(self.lt(preds),0) # torch.unsqueeze makes error when prediction
elif(self.opt['mode'] == 'interactive'):
xes = self.lt(preds)
"""
xes = torch.unsqueeze(self.lt(preds),0)
max_len += 1
for b in range(batchsize):
if not done[b]:
#pdb.set_trace()
#token = self.v2t(preds.data[b])
token = self.v2t([preds.data[b]]) # KB-KAIST
if token == self.END:
# if we produced END, we're done
done[b] = True
total_done += 1
else:
output_lines[b].append(token)
if (random.random() < 0.1 and self.opt['mode'] == 'train'):
print('prediction:', ' '.join(output_lines[0]))
return output_lines, text_cand_inds
def batchify(self, observations):
"""Convert a list of observations into input & target tensors."""
exs = [ex for ex in observations if 'text' in ex]
valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]
batchsize = len(exs)
xs = None
if batchsize > 0:
parsed = [self.parse(ex['text']) for ex in exs]
min_x_len = min([len(x) for x in parsed])
max_x_len = max([len(x) for x in parsed])
parsed_x_len = min(min_x_len + 12, max_x_len, 48)
parsed = [x[:parsed_x_len] for x in parsed]
xs = torch.LongTensor(batchsize, parsed_x_len).fill_(0)
for i, x in enumerate(parsed):
offset = parsed_x_len - len(x)
for j, idx in enumerate(x):
xs[i][j + offset] = idx
if self.use_cuda:
self.xs.resize_(xs.size())
self.xs.copy_(xs, async=True)
xs = Variable(self.xs)
else:
xs = Variable(xs)
ys = None
if batchsize > 0 and any(['labels' in ex for ex in exs]):
labels = [random.choice(ex.get('labels', [''])) + ' ' + self.END for ex in exs]
parsed = [self.parse(y) for y in labels]
min_y_len = min(len(y) for y in parsed)
max_y_len = max(len(y) for y in parsed)
parsed_y_len = min(min_y_len + 6, max_y_len)
parsed = [y[:parsed_y_len] for y in parsed]
ys = torch.LongTensor(batchsize, parsed_y_len).fill_(0)
for i, y in enumerate(parsed):
for j, idx in enumerate(y):
ys[i][j] = idx
if self.use_cuda:
self.ys.resize_(ys.size())
self.ys.copy_(ys, async=True)
ys = Variable(self.ys)
else:
ys = Variable(ys)
cands = None
valid_cands = None
if ys is None and self.rank:
parsed = []
valid_cands = []
for i in valid_inds:
if 'label_candidates' in observations[i]:
cs = list(observations[i]['label_candidates'])
parsed.append([self.parse(c) for c in cs])
valid_cands.append((i, cs))
if len(parsed) > 0:
# padding for varying number of cands per example
# found cands, pack them into tensor
max_c_len = max(max(len(c) for c in cs) for cs in parsed)
max_c_cnt = max(len(cs) for cs in parsed)
cands = torch.LongTensor(len(parsed), max_c_cnt, max_c_len).fill_(0)
for i, cs in enumerate(parsed):
for j, c in enumerate(cs):
for k, idx in enumerate(c):
cands[i][j][k] = idx
if self.use_cuda:
# copy to gpu
self.cands.resize_(cands.size())
self.cands.copy_(cands, async=True)
cands = Variable(self.cands)
else:
cands = Variable(cands)
return xs, ys, valid_inds, cands, valid_cands
def batch_act(self, observations):
batchsize = len(observations)
# initialize a table of replies with this agent's id
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
xs, ys, valid_inds, cands, valid_cands = self.batchify(observations)
if xs is None:
return batch_reply
predictions, text_cand_inds = self.predict(xs, ys, cands)
for i in range(len(predictions)):
curr = batch_reply[valid_inds[i]]
curr['text'] = ' '.join(c for c in predictions[i] if c != self.END
and c != self.dict.null_token)
if text_cand_inds is not None:
for i in range(len(valid_cands)):
order = text_cand_inds[i]
batch_idx, curr_cands = valid_cands[i]
curr = batch_reply[batch_idx]
curr['text_candidates'] = [curr_cands[idx] for idx in order
if idx < len(curr_cands)]
return batch_reply
def act(self):
return self.batch_act([self.observation])[0]
def save(self, path=None):
path = self.opt.get('model_file', None) if path is None else path
if path and hasattr(self, 'lt'):
model = {}
model['lt'] = self.lt.state_dict()
model['encoder'] = self.encoder.state_dict()
model['decoder'] = self.decoder.state_dict()
model['h2o'] = self.h2o.state_dict()
model['longest_label'] = self.longest_label
model['opt'] = self.opt
with open(path, 'wb') as write:
torch.save(model, write)
def shutdown(self):
"""Save the state of the model when shutdown."""
path = self.opt.get('model_file', None)
if path is not None:
self.save(path + '.shutdown_state')
super().shutdown()
def load(self, path):
"""Return opt and model states."""
with open(path, 'rb') as read:
model = torch.load(read)
return model['opt'], model
def set_states(self, states):
"""Set the state dicts of the modules from saved states."""
self.lt.load_state_dict(states['lt'])
self.encoder.load_state_dict(states['encoder'])
self.decoder.load_state_dict(states['decoder'])
self.h2o.load_state_dict(states['h2o'])
self.longest_label = states['longest_label']
| false
| true
|
790747fa2be676727fad64cf03f3280c33d9e9f9
| 364
|
py
|
Python
|
examples/get_channel_spline_coefficients.py
|
lento234/pyJHTDB
|
9d525b790037456839ce82a88a086faabf034c67
|
[
"Apache-2.0"
] | 55
|
2015-04-10T14:49:01.000Z
|
2022-03-28T17:07:20.000Z
|
examples/get_channel_spline_coefficients.py
|
joelguerrero/pyJHTDB
|
4050fb49010eb6b27776e5c2c0fe0cab765eefb1
|
[
"Apache-2.0"
] | 26
|
2015-03-18T16:44:37.000Z
|
2022-02-23T09:08:15.000Z
|
examples/get_channel_spline_coefficients.py
|
joelguerrero/pyJHTDB
|
4050fb49010eb6b27776e5c2c0fe0cab765eefb1
|
[
"Apache-2.0"
] | 36
|
2015-03-19T19:10:14.000Z
|
2022-03-30T09:28:58.000Z
|
import pyJHTDB
# M1Q4
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200)
ii.write_coefficients()
# M2Q8
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 3)
ii.write_coefficients()
# M2Q14
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 6)
ii.write_coefficients()
| 22.75
| 87
| 0.785714
|
import pyJHTDB
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200)
ii.write_coefficients()
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 3)
ii.write_coefficients()
ii = pyJHTDB.interpolator.spline_interpolator(pyJHTDB.dbinfo.channel5200, m = 2, n = 6)
ii.write_coefficients()
| true
| true
|
7907484ec8138862371c5292a11543726c194ffc
| 538
|
py
|
Python
|
Mesmer/manage.py
|
pythongiant/Mesmer
|
dcaafa49721d00893f9ca5eba1bfff94f6c7b96c
|
[
"Apache-2.0"
] | 1
|
2019-04-12T12:25:06.000Z
|
2019-04-12T12:25:06.000Z
|
Mesmer/manage.py
|
pythongiant/Mesmer
|
dcaafa49721d00893f9ca5eba1bfff94f6c7b96c
|
[
"Apache-2.0"
] | 15
|
2018-11-27T13:56:16.000Z
|
2018-11-30T17:35:41.000Z
|
Mesmer/manage.py
|
pythongiant/Mesmer
|
dcaafa49721d00893f9ca5eba1bfff94f6c7b96c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mesmer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625
| 73
| 0.685874
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mesmer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
7907486c7c98dc36d5cc5fd939e4c95bfe2eb5b6
| 36,111
|
py
|
Python
|
pandas/tests/frame/test_reshape.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/frame/test_reshape.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/frame/test_reshape.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2019-03-08T19:59:05.000Z
|
2020-09-27T03:18:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_unused_levels(self):
# GH 17845: unused labels in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, labels)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
labels = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, labels)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
# Broken (GH 18455):
# tm.assert_frame_equal(result, expected)
diff = result - expected
assert(diff.sum().sum() == 0)
assert((diff + 1).sum().sum() == 8)
assert((result.columns.levels[1] == idx.levels[level]).all())
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused labels on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
| 40.52862
| 79
| 0.480131
|
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_unused_levels(self):
# GH 17845: unused labels in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, labels)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
labels = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, labels)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
# Broken (GH 18455):
# tm.assert_frame_equal(result, expected)
diff = result - expected
assert(diff.sum().sum() == 0)
assert((diff + 1).sum().sum() == 8)
assert((result.columns.levels[1] == idx.levels[level]).all())
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused labels on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_unstack_fill_frame_object():
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
| true
| true
|
7907491c44db7e39d0ead454367e272b582c47bd
| 4,545
|
py
|
Python
|
WebKit/Admin/ServletCache.py
|
Cito/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 18
|
2016-08-01T20:15:59.000Z
|
2019-12-24T16:00:03.000Z
|
WebKit/Admin/ServletCache.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-13T05:48:45.000Z
|
2020-01-09T18:29:12.000Z
|
WebKit/Admin/ServletCache.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-16T14:32:29.000Z
|
2020-01-03T18:52:16.000Z
|
import os
import time
from WebKit.URLParser import ServletFactoryManager
from WebUtils.Funcs import htmlEncode
from AdminSecurity import AdminSecurity
class ServletCache(AdminSecurity):
"""Display servlet cache.
This servlet displays, in a readable form, the internal data
structure of the cache of all servlet factories.
This can be useful for debugging WebKit problems and the
information is interesting in general.
"""
def title(self):
return 'Servlet Cache'
def writeContent(self):
wr = self.writeln
factories = [factory for factory in ServletFactoryManager._factories
if factory._classCache]
if not factories:
wr('<h4>No caching servlet factories found.</h4>')
wr('<p>Caching can be activated by setting'
' <code>CacheServletClasses = True</code>.</p>')
return
if len(factories) > 1:
factories.sort()
wr('<h3>Servlet Factories:</h3>')
wr('<table>')
for factory in factories:
wr('<tr><td><a href="#%s">%s</a></td></tr>'
% ((factory.name(),)*2))
wr('</table>')
req = self.request()
wr('<form action="ServletCache" method="post">')
for factory in factories:
name = factory.name()
wr('<a id="%s"></a><h4>%s</h4>' % ((name,)*2))
if req.hasField('flush_' + name):
factory.flushCache()
wr('<p style="color:green">'
'The servlet cache has been flushed. '
'<input type="submit" name="reload" value="Reload"></p>')
continue
wr(htCache(factory))
wr('</form>')
def htCache(factory):
"""Output the cache of a servlet factory."""
html = []
wr = html.append
cache = factory._classCache
keys = sorted(cache)
wr('<p>Uniqueness: %s</p>' % factory.uniqueness())
wr('<p>Extensions: %s</p>' % ', '.join(map(repr, factory.extensions())))
wr('<p>Unique paths in the servlet cache: <strong>%d</strong>'
' <input type="submit" name="flush_%s" value="Flush"></p>'
% (len(keys), factory.name()))
wr('<p>Click any link to jump to the details for that path.</p>')
wr('<h5>Filenames:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>File</th><th>Directory</th></tr>')
paths = []
for key in keys:
head, tail = os.path.split(key)
path = dict(dir=head, base=tail, full=key)
paths.append(path)
paths.sort(key=lambda p: (p['base'].lower(), p['dir'].lower()))
# At this point, paths is a list where each element is a dictionary
# with directory name, base name, full path name sorted first by
# base name and second by dir name.
for path in paths:
wr('<tr><td><a href="#id%s">%s</a></td><td>%s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
wr('</table>')
wr('<h5>Full paths:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>Servlet path</th></tr>')
for key in keys:
wr('<tr><td><a href="#%s">%s</a></td></tr>' % (id(key), key))
wr('</table>')
wr('<h5>Details:</h5>')
wr('<table class="NiceTable">')
for path in paths:
wr('<tr class="NoTable"><td colspan="2">'
'<a id="id%s"></a><strong>%s</strong> - %s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
record = cache[path['full']].copy()
record['path'] = path['full']
if path['full'] in factory._threadsafeServletCache:
record['instances'] = 'one servlet instance (threadsafe)'
else:
record['instances'] = ('free reusable servlets: %d'
% len(factory._servletPool))
wr(htRecord(record))
wr('</table>')
return '\n'.join(html)
def htRecord(record):
html = []
wr = html.append
for key in sorted(record):
htKey = htmlEncode(key)
# determine the HTML for the value
value = record[key]
htValue = None
# check for special cases where we want a custom display
if hasattr(value, '__name__'):
htValue = value.__name__
if key == 'mtime':
htValue = '%s (%s)' % (time.asctime(time.localtime(value)), value)
# the general case:
if not htValue:
htValue = htmlEncode(str(value))
wr('<tr><th>%s</th><td>%s</td></tr>' % (htKey, htValue))
return '\n'.join(html)
| 37.254098
| 78
| 0.551155
|
import os
import time
from WebKit.URLParser import ServletFactoryManager
from WebUtils.Funcs import htmlEncode
from AdminSecurity import AdminSecurity
class ServletCache(AdminSecurity):
def title(self):
return 'Servlet Cache'
def writeContent(self):
wr = self.writeln
factories = [factory for factory in ServletFactoryManager._factories
if factory._classCache]
if not factories:
wr('<h4>No caching servlet factories found.</h4>')
wr('<p>Caching can be activated by setting'
' <code>CacheServletClasses = True</code>.</p>')
return
if len(factories) > 1:
factories.sort()
wr('<h3>Servlet Factories:</h3>')
wr('<table>')
for factory in factories:
wr('<tr><td><a href="#%s">%s</a></td></tr>'
% ((factory.name(),)*2))
wr('</table>')
req = self.request()
wr('<form action="ServletCache" method="post">')
for factory in factories:
name = factory.name()
wr('<a id="%s"></a><h4>%s</h4>' % ((name,)*2))
if req.hasField('flush_' + name):
factory.flushCache()
wr('<p style="color:green">'
'The servlet cache has been flushed. '
'<input type="submit" name="reload" value="Reload"></p>')
continue
wr(htCache(factory))
wr('</form>')
def htCache(factory):
html = []
wr = html.append
cache = factory._classCache
keys = sorted(cache)
wr('<p>Uniqueness: %s</p>' % factory.uniqueness())
wr('<p>Extensions: %s</p>' % ', '.join(map(repr, factory.extensions())))
wr('<p>Unique paths in the servlet cache: <strong>%d</strong>'
' <input type="submit" name="flush_%s" value="Flush"></p>'
% (len(keys), factory.name()))
wr('<p>Click any link to jump to the details for that path.</p>')
wr('<h5>Filenames:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>File</th><th>Directory</th></tr>')
paths = []
for key in keys:
head, tail = os.path.split(key)
path = dict(dir=head, base=tail, full=key)
paths.append(path)
paths.sort(key=lambda p: (p['base'].lower(), p['dir'].lower()))
for path in paths:
wr('<tr><td><a href="#id%s">%s</a></td><td>%s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
wr('</table>')
wr('<h5>Full paths:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>Servlet path</th></tr>')
for key in keys:
wr('<tr><td><a href="#%s">%s</a></td></tr>' % (id(key), key))
wr('</table>')
wr('<h5>Details:</h5>')
wr('<table class="NiceTable">')
for path in paths:
wr('<tr class="NoTable"><td colspan="2">'
'<a id="id%s"></a><strong>%s</strong> - %s</td></tr>'
% (id(path['full']), path['base'], path['dir']))
record = cache[path['full']].copy()
record['path'] = path['full']
if path['full'] in factory._threadsafeServletCache:
record['instances'] = 'one servlet instance (threadsafe)'
else:
record['instances'] = ('free reusable servlets: %d'
% len(factory._servletPool))
wr(htRecord(record))
wr('</table>')
return '\n'.join(html)
def htRecord(record):
html = []
wr = html.append
for key in sorted(record):
htKey = htmlEncode(key)
value = record[key]
htValue = None
if hasattr(value, '__name__'):
htValue = value.__name__
if key == 'mtime':
htValue = '%s (%s)' % (time.asctime(time.localtime(value)), value)
if not htValue:
htValue = htmlEncode(str(value))
wr('<tr><th>%s</th><td>%s</td></tr>' % (htKey, htValue))
return '\n'.join(html)
| true
| true
|
79074935d87c58a8e4e5cc0f35bd1e715da2edc1
| 1,857
|
py
|
Python
|
docs/conf.py
|
keathmilligan/flask-quickstart
|
9a5047135ab4d43c5046a892d4f677a7251f5c62
|
[
"MIT"
] | 16
|
2016-11-23T15:36:41.000Z
|
2021-11-04T07:01:50.000Z
|
docs/conf.py
|
keathmilligan/flask-jwt-refresh
|
9a5047135ab4d43c5046a892d4f677a7251f5c62
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
keathmilligan/flask-jwt-refresh
|
9a5047135ab4d43c5046a892d4f677a7251f5c62
|
[
"MIT"
] | 6
|
2016-12-08T23:23:38.000Z
|
2021-04-20T16:35:57.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'sample'
copyright = '2020, Sample Author'
author = 'Sample Author'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 35.711538
| 79
| 0.661282
|
project = 'sample'
copyright = '2020, Sample Author'
author = 'Sample Author'
extensions = [
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
html_static_path = ['_static']
| true
| true
|
790749a95107925172b4974d5e2ee596c1881b1d
| 2,854
|
py
|
Python
|
airflow/operators/generic_transfer.py
|
findpace/incubator-airflow
|
f136c5b8f0054e9890b266adfe6624358cd610a2
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/operators/generic_transfer.py
|
findpace/incubator-airflow
|
f136c5b8f0054e9890b266adfe6624358cd610a2
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/operators/generic_transfer.py
|
findpace/incubator-airflow
|
f136c5b8f0054e9890b266adfe6624358cd610a2
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is mean to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database
:type sql: str
:param destination_table: target table
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.logger.info("Extracting data from %s", self.source_conn_id)
self.logger.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.logger.info("Running preoperator")
self.logger.info(self.preoperator)
destination_hook.run(self.preoperator)
self.logger.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| 37.552632
| 80
| 0.700771
|
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.logger.info("Extracting data from %s", self.source_conn_id)
self.logger.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.logger.info("Running preoperator")
self.logger.info(self.preoperator)
destination_hook.run(self.preoperator)
self.logger.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| true
| true
|
790749b1fa46b50dddce602d374e171d84cf4597
| 35,929
|
py
|
Python
|
tensorflow/python/kernel_tests/variables_test.py
|
jray319/tensorflow
|
5cdf8f26c806e893e0773ad34e2b59008cc6f8ec
|
[
"Apache-2.0"
] | 4
|
2020-06-28T08:25:36.000Z
|
2021-08-12T12:41:34.000Z
|
tensorflow/python/kernel_tests/variables_test.py
|
jray319/tensorflow
|
5cdf8f26c806e893e0773ad34e2b59008cc6f8ec
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:12:24.000Z
|
2022-02-10T02:04:13.000Z
|
tensorflow/python/kernel_tests/variables_test.py
|
jray319/tensorflow
|
5cdf8f26c806e893e0773ad34e2b59008cc6f8ec
|
[
"Apache-2.0"
] | 4
|
2019-11-28T12:18:07.000Z
|
2021-08-01T16:12:17.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "Shapes.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| 38.926327
| 94
| 0.676863
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "Shapes.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
d = constant_op.constant(2.0)
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs)
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(variable_def=v_def)
self.assertProtoEquals(v_def, v.to_proto())
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| true
| true
|
79074b77a2532e7f874c5a246dbe006f99ef49ae
| 3,727
|
py
|
Python
|
tests/serverobjects/test_ban.py
|
matthew-robertson/banned-word-tracker
|
32defe7936114258325ef8ba2f740648d43d4abf
|
[
"MIT"
] | 11
|
2019-03-10T18:31:59.000Z
|
2021-02-13T12:42:44.000Z
|
tests/serverobjects/test_ban.py
|
matthew-robertson/banned-word-tracker
|
32defe7936114258325ef8ba2f740648d43d4abf
|
[
"MIT"
] | 51
|
2019-02-21T21:21:59.000Z
|
2022-03-09T01:29:55.000Z
|
tests/serverobjects/test_ban.py
|
matthew-robertson/vore-tracker
|
c35807612397ae7bc540cb0a1af6bf3ec1f98593
|
[
"MIT"
] | 5
|
2018-07-12T06:36:29.000Z
|
2019-01-09T04:11:19.000Z
|
import unittest
import datetime
from unittest.mock import Mock, patch
from types import MethodType
from serverobjects.ban import BanInstance
class TestCheckIfMessageInfringes(unittest.TestCase):
def test_check_if_message_infringes__exact_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('test'))
def test_check_if_message_infringes__embedded_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('this is a test message.'))
def test_check_if_message_infringes__no_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertFalse(test_ban.check_if_message_infringes('this message does not infringe.'))
def test_check_if_message_infringes__word_embedded_in_other(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertFalse(test_ban.check_if_message_infringes('omnivore'))
def test_check_if_message_infringes__at_mention_test(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': '<@12345>',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes(' <@12345> '))
self.assertTrue(test_ban.check_if_message_infringes('<@12345>'))
def test_check_if_message_infringes__similar_word_unicode(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('vÒrË'))
self.assertTrue(test_ban.check_if_message_infringes('vᴑRè'))
def test_check_if_message_infringes__similar_word_formatting(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('-v-o-r-e-'))
self.assertTrue(test_ban.check_if_message_infringes('**v**o**r**e**'))
self.assertTrue(test_ban.check_if_message_infringes('|||v||||o||||r||e|||'))
| 28.450382
| 90
| 0.635632
|
import unittest
import datetime
from unittest.mock import Mock, patch
from types import MethodType
from serverobjects.ban import BanInstance
class TestCheckIfMessageInfringes(unittest.TestCase):
def test_check_if_message_infringes__exact_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('test'))
def test_check_if_message_infringes__embedded_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('this is a test message.'))
def test_check_if_message_infringes__no_match(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'test',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertFalse(test_ban.check_if_message_infringes('this message does not infringe.'))
def test_check_if_message_infringes__word_embedded_in_other(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertFalse(test_ban.check_if_message_infringes('omnivore'))
def test_check_if_message_infringes__at_mention_test(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': '<@12345>',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes(' <@12345> '))
self.assertTrue(test_ban.check_if_message_infringes('<@12345>'))
def test_check_if_message_infringes__similar_word_unicode(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('vÒrË'))
self.assertTrue(test_ban.check_if_message_infringes('vᴑRè'))
def test_check_if_message_infringes__similar_word_formatting(self):
test_ban = BanInstance(
{
'rowid': 1,
'banned_word': 'vore',
'calledout_at': '2019-11-11 11:11:11',
'infracted_at': '2019-11-11 11:11:11',
'server_id': 1234,
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
},
datetime.datetime.now(),
0, None)
self.assertTrue(test_ban.check_if_message_infringes('-v-o-r-e-'))
self.assertTrue(test_ban.check_if_message_infringes('**v**o**r**e**'))
self.assertTrue(test_ban.check_if_message_infringes('|||v||||o||||r||e|||'))
| true
| true
|
79074c329932ce8ea6558ed22dc07bea64855278
| 2,240
|
py
|
Python
|
Python/Machine Learning/Indian AI Production/Feature Engineering/05-Categorical Missing value imputation.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Machine Learning/Indian AI Production/Feature Engineering/05-Categorical Missing value imputation.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Machine Learning/Indian AI Production/Feature Engineering/05-Categorical Missing value imputation.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
# todo : Data Cleaning
# todo : Categorical Missing value imputation Part-5
# todo : Importing library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# todo : import data
df = pd.read_csv(r"G:\DataSet\House Price Prediction\train.csv")
# todo : analysis data
df.head()
# todo : selecting categorical columns
cat_vars = df.select_dtypes(include='object')
# todo : analysis of categorical data
cat_vars.head()
cat_vars.isnull().sum()
miss_val_per = cat_vars.isnull().mean() * 100
print(miss_val_per)
# todo : dropping column that has more missing values
drop_vars = ['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature']
cat_vars.drop(columns=drop_vars, axis=1, inplace=True)
print(cat_vars.shape)
# todo : getting column name that has missing values in less amount
isnull_per = cat_vars.isnull().mean() * 100
miss_vars = isnull_per[isnull_per > 0].keys()
print(miss_vars)
cat_vars['MasVnrType'].fillna('Missing')
# todo : it shows mode value of that column
cat_vars['MasVnrType'].mode()
# todo : it shows how much which value is present
cat_vars['MasVnrType'].value_counts()
# todo : filling mode value in missing values of column
cat_vars['MasVnrType'].fillna(cat_vars['MasVnrType'].mode()[0])
cat_vars['MasVnrType'].fillna(cat_vars['MasVnrType'].mode()[0]).value_counts()
cat_vars_copy = cat_vars.copy()
# todo : filling mode values in each categorical column
for var in miss_vars:
cat_vars_copy[var].fillna(cat_vars[var].mode()[0], inplace=True)
print(var, "=", cat_vars[var].mode()[0])
# todo : check how null values are present in data
cat_vars_copy.isnull().sum().sum()
# todo : checking changes in original dataset after impute mode value in visualize format
plt.figure(figsize=(16, 9))
for i, var in enumerate(miss_vars):
plt.subplot(4, 3, i + 1)
plt.hist(cat_vars_copy[var], label="Impute")
plt.hist(cat_vars[var].dropna(), label="Original")
plt.legend()
# todo : updating main dataset
df.update(cat_vars_copy)
# todo : deleting column from main dataset that has more missing values
df.drop(columns=drop_vars, inplace=True)
# todo : checking now which categorical column has null values
df.select_dtypes(include='object').isnull().sum()
| 29.473684
| 89
| 0.738393
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv(r"G:\DataSet\House Price Prediction\train.csv")
df.head()
cat_vars = df.select_dtypes(include='object')
cat_vars.head()
cat_vars.isnull().sum()
miss_val_per = cat_vars.isnull().mean() * 100
print(miss_val_per)
drop_vars = ['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature']
cat_vars.drop(columns=drop_vars, axis=1, inplace=True)
print(cat_vars.shape)
isnull_per = cat_vars.isnull().mean() * 100
miss_vars = isnull_per[isnull_per > 0].keys()
print(miss_vars)
cat_vars['MasVnrType'].fillna('Missing')
cat_vars['MasVnrType'].mode()
cat_vars['MasVnrType'].value_counts()
cat_vars['MasVnrType'].fillna(cat_vars['MasVnrType'].mode()[0])
cat_vars['MasVnrType'].fillna(cat_vars['MasVnrType'].mode()[0]).value_counts()
cat_vars_copy = cat_vars.copy()
for var in miss_vars:
cat_vars_copy[var].fillna(cat_vars[var].mode()[0], inplace=True)
print(var, "=", cat_vars[var].mode()[0])
cat_vars_copy.isnull().sum().sum()
plt.figure(figsize=(16, 9))
for i, var in enumerate(miss_vars):
plt.subplot(4, 3, i + 1)
plt.hist(cat_vars_copy[var], label="Impute")
plt.hist(cat_vars[var].dropna(), label="Original")
plt.legend()
df.update(cat_vars_copy)
df.drop(columns=drop_vars, inplace=True)
df.select_dtypes(include='object').isnull().sum()
| true
| true
|
79074cff7dbe0708598dc85ac55e7666a74992a7
| 1,043
|
py
|
Python
|
xlsxwriter/test/styles/test_write_fonts.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-05-19T22:17:15.000Z
|
2015-05-19T22:17:15.000Z
|
xlsxwriter/test/styles/test_write_fonts.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/styles/test_write_fonts.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteFonts(unittest.TestCase):
"""
Test the Styles _write_fonts() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_fonts(self):
"""Test the _write_fonts() method"""
xf_format = Format()
xf_format.has_font = 1
self.styles._set_style_properties([[xf_format], None, 1, 0, 0, 0, [], []])
self.styles._write_fonts()
exp = """<fonts count="1"><font><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font></fonts>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| 24.255814
| 149
| 0.569511
| true
| true
|
|
79074d348fba648bf6dcdf173c50845b268e2caa
| 1,795
|
py
|
Python
|
streamlit_terran_timeline/examples/youtube.py
|
cenkbircanoglu/streamlit-terran-timeline
|
7c0f3b1c77ac0f79b159a50723b9fa72eb37ff53
|
[
"MIT"
] | 56
|
2020-08-10T13:58:36.000Z
|
2021-05-26T07:24:19.000Z
|
streamlit_terran_timeline/examples/youtube.py
|
cenkbircanoglu/streamlit-terran-timeline
|
7c0f3b1c77ac0f79b159a50723b9fa72eb37ff53
|
[
"MIT"
] | 1
|
2022-02-28T01:42:06.000Z
|
2022-02-28T01:42:06.000Z
|
streamlit_terran_timeline/examples/youtube.py
|
cenkbircanoglu/streamlit-terran-timeline
|
7c0f3b1c77ac0f79b159a50723b9fa72eb37ff53
|
[
"MIT"
] | 8
|
2020-08-11T15:39:30.000Z
|
2021-03-24T22:40:50.000Z
|
import streamlit as st
import warnings
try:
from streamlit_terran_timeline import terran_timeline, generate_timeline
except ImportError:
warnings.warn(
"Failed to load terran_timeline from streamlit_terran_timeline. "
"Please run 'pip install streamlit_terran_timeline' or "
"'pip install .' if working locally"
)
exit(1)
st.header("Face-recognition interactive-timeline generator")
st.write(
"In this demo we show you how easy it is to create an interactive"
"timeline chart of faces detected on videos. Thanksfully, there's an open "
"source project called Terran that makes all this process super super easy!"
)
st.write("More descriptions here")
st.subheader("Loading your video")
st.write(
"You can select videos from **multiple sources**: "
"YouTube and almost any video streaming platform, or any local file"
)
#
# Ask the user to input a video link or path and show the video below
#
video_path = st.text_input(
"Link or path to video", "https://www.youtube.com/watch?v=v2VgA_MCNDg"
)
#
# Show the actual faces timeline chart
#
st.subheader("Faces timeline chart")
st.write("")
@st.cache(persist=True, ttl=86_400, suppress_st_warning=True, show_spinner=False)
def _generate_timeline(video_path):
timeline = generate_timeline(
video_src=video_path,
appearence_threshold=5,
batch_size=32,
duration=None,
framerate=8,
output_directory="timelines",
ref_directory=None,
similarity_threshold=0.75,
start_time=0,
thumbnail_rate=1,
)
return timeline
with st.spinner("Generating timeline"):
timeline = _generate_timeline(video_path)
start_time = terran_timeline(timeline)
st.video(video_path, start_time=int(start_time))
| 26.014493
| 81
| 0.71532
|
import streamlit as st
import warnings
try:
from streamlit_terran_timeline import terran_timeline, generate_timeline
except ImportError:
warnings.warn(
"Failed to load terran_timeline from streamlit_terran_timeline. "
"Please run 'pip install streamlit_terran_timeline' or "
"'pip install .' if working locally"
)
exit(1)
st.header("Face-recognition interactive-timeline generator")
st.write(
"In this demo we show you how easy it is to create an interactive"
"timeline chart of faces detected on videos. Thanksfully, there's an open "
"source project called Terran that makes all this process super super easy!"
)
st.write("More descriptions here")
st.subheader("Loading your video")
st.write(
"You can select videos from **multiple sources**: "
"YouTube and almost any video streaming platform, or any local file"
)
#
# Ask the user to input a video link or path and show the video below
#
video_path = st.text_input(
"Link or path to video", "https://www.youtube.com/watch?v=v2VgA_MCNDg"
)
#
# Show the actual faces timeline chart
#
st.subheader("Faces timeline chart")
st.write("")
@st.cache(persist=True, ttl=86_400, suppress_st_warning=True, show_spinner=False)
def _generate_timeline(video_path):
timeline = generate_timeline(
video_src=video_path,
appearence_threshold=5,
batch_size=32,
duration=None,
framerate=8,
output_directory="timelines",
ref_directory=None,
similarity_threshold=0.75,
start_time=0,
thumbnail_rate=1,
)
return timeline
with st.spinner("Generating timeline"):
timeline = _generate_timeline(video_path)
start_time = terran_timeline(timeline)
st.video(video_path, start_time=int(start_time))
| true
| true
|
79074d511a346e6151277a58a9bf4b1843df65f7
| 111
|
py
|
Python
|
backend/app/crud/__init__.py
|
Infam852/IoT-project
|
673d8a96676e046331550b9c16c0610de5733f73
|
[
"MIT"
] | null | null | null |
backend/app/crud/__init__.py
|
Infam852/IoT-project
|
673d8a96676e046331550b9c16c0610de5733f73
|
[
"MIT"
] | null | null | null |
backend/app/crud/__init__.py
|
Infam852/IoT-project
|
673d8a96676e046331550b9c16c0610de5733f73
|
[
"MIT"
] | 1
|
2021-12-18T19:33:01.000Z
|
2021-12-18T19:33:01.000Z
|
from app.crud.crud_crosswalk import *
from app.crud.crud_statistics import *
from app.crud.crud_users import *
| 27.75
| 38
| 0.810811
|
from app.crud.crud_crosswalk import *
from app.crud.crud_statistics import *
from app.crud.crud_users import *
| true
| true
|
79074e63ba61112d1796829fb1e088a02aaab291
| 11,829
|
py
|
Python
|
tools.py
|
billhhh/model-quantization-1
|
e816c3ffd36426810e31de04dfdec1894a600c2d
|
[
"BSD-2-Clause"
] | null | null | null |
tools.py
|
billhhh/model-quantization-1
|
e816c3ffd36426810e31de04dfdec1894a600c2d
|
[
"BSD-2-Clause"
] | null | null | null |
tools.py
|
billhhh/model-quantization-1
|
e816c3ffd36426810e31de04dfdec1894a600c2d
|
[
"BSD-2-Clause"
] | null | null | null |
import os, sys, glob, argparse
import logging
import types
from collections import OrderedDict
import torch
import torch.nn.functional as F
import utils
import models
import main as entry
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def export_onnx(args):
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
print("load pretrained from %s" % args.old)
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
print("load pretrained ==> last epoch: %d" % checkpoint.get('epoch', 0))
print("load pretrained ==> last best_acc: %f" % checkpoint.get('best_acc', 0))
print("load pretrained ==> last learning_rate: %f" % checkpoint.get('learning_rate', 0))
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None))
except RuntimeError:
print("Loading pretrained model failed")
else:
print("no pretrained file exists({}), init model with default initlizer".
format(args.old))
onnx_model = torch.nn.Sequential(OrderedDict([
('network', model),
('softmax', torch.nn.Softmax()),
]))
onnx_path = "onnx/" + model_name
if not os.path.exists(onnx_path):
os.makedirs(onnx_path)
onnx_save = onnx_path + "/" + model_name + '.onnx'
input_names = ["input"]
dummy_input = torch.zeros((1, 3, args.input_size, args.input_size))
output_names = ['prob']
torch.onnx.export(
onnx_model,
dummy_input,
onnx_save,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=7,
keep_initializers_as_inputs=True
)
def inference(args):
from models.quant import custom_conv
def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
args=None, force_fp=False, feature_stride=1):
super(custom_conv, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.args = args
self.force_fp = True
custom_conv.__init__ = init
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
def forward(self, x):
print(x.shape, self.weight.shape, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
output = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
m.forward = types.MethodType(forward, m)
input = torch.rand(1, 3, args.input_size, args.input_size)
model.forward(input)
def get_parameter():
parser = entry.get_parser()
parser.add_argument('--old', type=str, default='')
parser.add_argument('--new', type=str, default='')
parser.add_argument('--mapping_from', '--mf', type=str, default='')
parser.add_argument('--mapping_to', '--mt', type=str, default='')
parser.add_argument('--verbose_list', default='ratio,sep', type=str)
args = parser.parse_args()
if isinstance(args.verbose_list, str):
args.verbose_list = [x.strip() for x in args.verbose_list.split(',')]
if isinstance(args.keyword, str):
args.keyword = [x.strip() for x in args.keyword.split(',')]
return args
def main():
args = get_parameter()
args.weights_dir = os.path.join(args.weights_dir, args.model)
utils.check_folder(args.weights_dir)
if os.path.exists(args.log_dir):
utils.setup_logging(os.path.join(args.log_dir, 'tools.txt'), resume=True)
config = dict()
for i in args.keyword:
config[i] = True
if 'export_onnx' in config.keys():
export_onnx(args)
if 'inference' in config.keys():
inference(args)
if 'verbose' in config.keys():
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if 'model' in checkpoint:
checkpoint = checkpoint['model']
for name, value in checkpoint.items():
if ('quant_activation' in name or 'quant_weight' in name) and name.split('.')[-1] in args.verbose_list:
print(name, value.shape, value.requires_grad)
print(value.data)
elif "all" in args.verbose_list:
if 'num_batches_tracked' not in name:
if isinstance(value, torch.Tensor):
print(name, value.shape, value.requires_grad)
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
print(name, value, type(value))
else:
print(name, type(value))
if 'load' in config.keys() or 'save' in config.keys():
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
raw = 'raw' in config.keys()
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None) if not raw else checkpoint, verbose=False)
except RuntimeError:
print("Loading pretrained model failed")
print("Loading pretrained model OK")
if 'save' in config.keys() and args.new != '':
torch.save(model.state_dict(), args.new)
print("Save pretrained model into %s" % args.new)
else:
print("file not exist %s" % args.old)
if 'update' in config.keys():
mapping_from = []
mapping_to = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
if os.path.isfile(args.mapping_to):
with open(args.mapping_to) as f:
mapping_to = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
mapping_to = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_to]
mapping_to = [ i for i in mapping_to if len(i) > 0 and i[0] != '#']
if len(mapping_to) != len(mapping_from) or len(mapping_to) == 0 or len(mapping_from) == 0:
mapping = None
logging.info('no valid mapping')
else:
mapping = dict()
for i, k in enumerate(mapping_from):
if '{' in k and '}' in k and '{' in mapping_to[i] and '}' in mapping_to[i]:
item = k.split('{')
for v in item[1].strip('}').split(","):
v = v.strip()
mapping[item[0] + v] = mapping_to[i].split('{')[0] + v
else:
mapping[k] = mapping_to[i]
raw = 'raw' in config.keys()
if not os.path.isfile(args.old):
args.old = args.pretrained
utils.import_state_dict(args.old, args.new, mapping, raw, raw_prefix=args.case)
if 'det-load' in config.keys():
from third_party.checkpoint import DetectionCheckpointer
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
split = os.path.split(args.old)
checkpointer = DetectionCheckpointer(model, split[0], save_to_disk=True)
checkpointer.resume_or_load(args.old, resume=True)
checkpointer.save(split[1])
if 'swap' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
lists = args.verbose_list
for i in lists:
item = i.split('/')
interval = (int)(item[0])
index = item[1].split('-')
index = [(int)(x) for x in index]
if len(mapping_from) % interval == 0 and len(index) <= interval:
mapping_to = mapping_from.copy()
for j, k in enumerate(index):
k = k % interval
mapping_to[j::interval] = mapping_from[k::interval]
mapping_to= [ i + '\n' for i in mapping_to]
with open(args.mapping_from + "-swap", 'w') as f:
f.writelines(mapping_to)
f.close()
if 'sort' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from.sort()
with open(args.mapping_from + "-sort", 'w') as f:
f.writelines(mapping_from)
f.close()
if 'verify-data' in config.keys() or 'verify-image' in config.keys():
if 'verify-image' in config.keys():
lists = args.verbose_list
else:
with open(os.path.join(args.root, 'train.txt')) as f:
lists = f.readlines()
f.close()
from PIL import Image
from threading import Thread
print("going to check %d files" % len(lists))
def check(lists, start, end, index):
for i, item in enumerate(lists[start:end]):
try:
items = item.split()
if len(items) >= 1:
path = items[0].strip().strip('\n')
else:
print("skip line %s" % i)
continue
path = os.path.join(args.root, os.path.join("train", path))
imgs = Image.open(path)
imgs.resize((256,256))
if index == 0:
print(i, end ="\r", file=sys.stderr)
except (RuntimeError, IOError):
print("\nError when read image %s" % path)
print("\nFinish checking", index)
#lists = lists[45000:]
num = min(len(lists), 20)
for i in range(num):
start = len(lists) // num * i
end = min(start + len(lists) // num, len(lists))
th = Thread(target=check, args=(lists, start, end, i))
th.start()
if __name__ == '__main__':
main()
| 40.234694
| 158
| 0.563108
|
import os, sys, glob, argparse
import logging
import types
from collections import OrderedDict
import torch
import torch.nn.functional as F
import utils
import models
import main as entry
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def export_onnx(args):
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
print("load pretrained from %s" % args.old)
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else:
checkpoint = torch.load(args.old, map_location='cpu')
print("load pretrained ==> last epoch: %d" % checkpoint.get('epoch', 0))
print("load pretrained ==> last best_acc: %f" % checkpoint.get('best_acc', 0))
print("load pretrained ==> last learning_rate: %f" % checkpoint.get('learning_rate', 0))
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None))
except RuntimeError:
print("Loading pretrained model failed")
else:
print("no pretrained file exists({}), init model with default initlizer".
format(args.old))
onnx_model = torch.nn.Sequential(OrderedDict([
('network', model),
('softmax', torch.nn.Softmax()),
]))
onnx_path = "onnx/" + model_name
if not os.path.exists(onnx_path):
os.makedirs(onnx_path)
onnx_save = onnx_path + "/" + model_name + '.onnx'
input_names = ["input"]
dummy_input = torch.zeros((1, 3, args.input_size, args.input_size))
output_names = ['prob']
torch.onnx.export(
onnx_model,
dummy_input,
onnx_save,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=7,
keep_initializers_as_inputs=True
)
def inference(args):
from models.quant import custom_conv
def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
args=None, force_fp=False, feature_stride=1):
super(custom_conv, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.args = args
self.force_fp = True
custom_conv.__init__ = init
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
def forward(self, x):
print(x.shape, self.weight.shape, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
output = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
m.forward = types.MethodType(forward, m)
input = torch.rand(1, 3, args.input_size, args.input_size)
model.forward(input)
def get_parameter():
parser = entry.get_parser()
parser.add_argument('--old', type=str, default='')
parser.add_argument('--new', type=str, default='')
parser.add_argument('--mapping_from', '--mf', type=str, default='')
parser.add_argument('--mapping_to', '--mt', type=str, default='')
parser.add_argument('--verbose_list', default='ratio,sep', type=str)
args = parser.parse_args()
if isinstance(args.verbose_list, str):
args.verbose_list = [x.strip() for x in args.verbose_list.split(',')]
if isinstance(args.keyword, str):
args.keyword = [x.strip() for x in args.keyword.split(',')]
return args
def main():
args = get_parameter()
args.weights_dir = os.path.join(args.weights_dir, args.model)
utils.check_folder(args.weights_dir)
if os.path.exists(args.log_dir):
utils.setup_logging(os.path.join(args.log_dir, 'tools.txt'), resume=True)
config = dict()
for i in args.keyword:
config[i] = True
if 'export_onnx' in config.keys():
export_onnx(args)
if 'inference' in config.keys():
inference(args)
if 'verbose' in config.keys():
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else:
checkpoint = torch.load(args.old, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if 'model' in checkpoint:
checkpoint = checkpoint['model']
for name, value in checkpoint.items():
if ('quant_activation' in name or 'quant_weight' in name) and name.split('.')[-1] in args.verbose_list:
print(name, value.shape, value.requires_grad)
print(value.data)
elif "all" in args.verbose_list:
if 'num_batches_tracked' not in name:
if isinstance(value, torch.Tensor):
print(name, value.shape, value.requires_grad)
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
print(name, value, type(value))
else:
print(name, type(value))
if 'load' in config.keys() or 'save' in config.keys():
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
raw = 'raw' in config.keys()
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else:
checkpoint = torch.load(args.old, map_location='cpu')
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None) if not raw else checkpoint, verbose=False)
except RuntimeError:
print("Loading pretrained model failed")
print("Loading pretrained model OK")
if 'save' in config.keys() and args.new != '':
torch.save(model.state_dict(), args.new)
print("Save pretrained model into %s" % args.new)
else:
print("file not exist %s" % args.old)
if 'update' in config.keys():
mapping_from = []
mapping_to = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
if os.path.isfile(args.mapping_to):
with open(args.mapping_to) as f:
mapping_to = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
mapping_to = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_to]
mapping_to = [ i for i in mapping_to if len(i) > 0 and i[0] != '#']
if len(mapping_to) != len(mapping_from) or len(mapping_to) == 0 or len(mapping_from) == 0:
mapping = None
logging.info('no valid mapping')
else:
mapping = dict()
for i, k in enumerate(mapping_from):
if '{' in k and '}' in k and '{' in mapping_to[i] and '}' in mapping_to[i]:
item = k.split('{')
for v in item[1].strip('}').split(","):
v = v.strip()
mapping[item[0] + v] = mapping_to[i].split('{')[0] + v
else:
mapping[k] = mapping_to[i]
raw = 'raw' in config.keys()
if not os.path.isfile(args.old):
args.old = args.pretrained
utils.import_state_dict(args.old, args.new, mapping, raw, raw_prefix=args.case)
if 'det-load' in config.keys():
from third_party.checkpoint import DetectionCheckpointer
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
split = os.path.split(args.old)
checkpointer = DetectionCheckpointer(model, split[0], save_to_disk=True)
checkpointer.resume_or_load(args.old, resume=True)
checkpointer.save(split[1])
if 'swap' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
lists = args.verbose_list
for i in lists:
item = i.split('/')
interval = (int)(item[0])
index = item[1].split('-')
index = [(int)(x) for x in index]
if len(mapping_from) % interval == 0 and len(index) <= interval:
mapping_to = mapping_from.copy()
for j, k in enumerate(index):
k = k % interval
mapping_to[j::interval] = mapping_from[k::interval]
mapping_to= [ i + '\n' for i in mapping_to]
with open(args.mapping_from + "-swap", 'w') as f:
f.writelines(mapping_to)
f.close()
if 'sort' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from.sort()
with open(args.mapping_from + "-sort", 'w') as f:
f.writelines(mapping_from)
f.close()
if 'verify-data' in config.keys() or 'verify-image' in config.keys():
if 'verify-image' in config.keys():
lists = args.verbose_list
else:
with open(os.path.join(args.root, 'train.txt')) as f:
lists = f.readlines()
f.close()
from PIL import Image
from threading import Thread
print("going to check %d files" % len(lists))
def check(lists, start, end, index):
for i, item in enumerate(lists[start:end]):
try:
items = item.split()
if len(items) >= 1:
path = items[0].strip().strip('\n')
else:
print("skip line %s" % i)
continue
path = os.path.join(args.root, os.path.join("train", path))
imgs = Image.open(path)
imgs.resize((256,256))
if index == 0:
print(i, end ="\r", file=sys.stderr)
except (RuntimeError, IOError):
print("\nError when read image %s" % path)
print("\nFinish checking", index)
#lists = lists[45000:]
num = min(len(lists), 20)
for i in range(num):
start = len(lists) // num * i
end = min(start + len(lists) // num, len(lists))
th = Thread(target=check, args=(lists, start, end, i))
th.start()
if __name__ == '__main__':
main()
| true
| true
|
79074e66dfc8cc37abd88356f198faae39e4528c
| 9,244
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/mellanox/onyx/plugins/modules/onyx_qos.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/mellanox/onyx/plugins/modules/onyx_qos.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/mellanox/onyx/plugins/modules/onyx_qos.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: onyx_qos
author: "Anas Badaha (@anasb)"
short_description: Configures QoS
description:
- This module provides declarative management of Onyx QoS configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
interfaces:
description:
- list of interfaces name.
required: true
trust:
description:
- trust type.
choices: ['L2', 'L3', 'both']
default: L2
rewrite_pcp:
description:
- rewrite with type pcp.
choices: ['enabled', 'disabled']
default: disabled
rewrite_dscp:
description:
- rewrite with type dscp.
choices: ['enabled', 'disabled']
default: disabled
'''
EXAMPLES = """
- name: Configure QoS
onyx_QoS:
interfaces:
- Mpo7
- Mpo7
trust: L3
rewrite_pcp: disabled
rewrite_dscp: enabled
- name: Configure QoS
onyx_QoS:
interfaces:
- Eth1/1
- Eth1/2
trust: both
rewrite_pcp: disabled
rewrite_dscp: enabled
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 qos trust L3
- interface mlag-port-channel 7 qos trust L3
- interface port-channel 1 qos trust L3
- interface mlag-port-channel 7 qos trust L2
- interface mlag-port-channel 7 qos rewrite dscp
- interface ethernet 1/16 qos rewrite pcp
- interface ethernet 1/1 no qos rewrite pcp
"""
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import show_cmd
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxQosModule(BaseOnyxModule):
TRUST_CMD = "interface {0} {1} qos trust {2}"
NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp"
NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp"
REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp"
REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp"
REWRITE_PCP = "pcp"
REWRITE_DSCP = "dscp"
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_PO_REGEX = re.compile(r"^Po(\d+)$")
MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$")
IF_TYPE_ETH = "ethernet"
PORT_CHANNEL = "port-channel"
MLAG_PORT_CHANNEL = "mlag-port-channel"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
PORT_CHANNEL: IF_PO_REGEX,
MLAG_PORT_CHANNEL: MLAG_NAME_REGEX
}
def init_module(self):
""" initialize module
"""
element_spec = dict(
interfaces=dict(type='list', required=True),
trust=dict(choices=['L2', 'L3', 'both'], default='L2'),
rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'),
rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id):
interface_qos_config = interface_qos_config[0].get(interface)
trust = interface_qos_config[0].get("Trust mode")
rewrite_dscp = interface_qos_config[0].get("DSCP rewrite")
rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite")
self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp,
rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id)
def _show_interface_qos(self, if_type, interface):
cmd = "show qos interface {0} {1}".format(if_type, interface)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
for interface in self._required_config.get("interfaces"):
if_type, if_id = self._get_interface_type(interface)
if not if_id:
self._module.fail_json(
msg='unsupported interface: {0}'.format(interface))
interface_qos_config = self._show_interface_qos(if_type, if_id)
if interface_qos_config is not None:
self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id)
else:
self._module.fail_json(
msg='Interface {0} does not exist on switch'.format(interface))
def generate_commands(self):
trust = self._required_config.get("trust")
rewrite_pcp = self._required_config.get("rewrite_pcp")
rewrite_dscp = self._required_config.get("rewrite_dscp")
for interface in self._required_config.get("interfaces"):
ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface)
self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust)
self._add_interface_rewrite_cmds(if_type, if_id, interface,
rewrite_pcp, rewrite_dscp)
def _get_current_rewrite_config(self, interface):
current_interface_qos_config = self._current_config.get(interface)
current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp')
current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp')
if_type = current_interface_qos_config.get("if_type")
if_id = current_interface_qos_config.get("if_id")
current_trust = current_interface_qos_config.get('trust')
return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id
def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if trust == "L3" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "L2" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "both" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id))
elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id))
elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite):
if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_pcp"] = "disabled"
elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_dscp"] = "disabled"
def main():
""" main entry point for module execution
"""
OnyxQosModule.main()
if __name__ == '__main__':
main()
| 39.844828
| 115
| 0.682172
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: onyx_qos
author: "Anas Badaha (@anasb)"
short_description: Configures QoS
description:
- This module provides declarative management of Onyx QoS configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
interfaces:
description:
- list of interfaces name.
required: true
trust:
description:
- trust type.
choices: ['L2', 'L3', 'both']
default: L2
rewrite_pcp:
description:
- rewrite with type pcp.
choices: ['enabled', 'disabled']
default: disabled
rewrite_dscp:
description:
- rewrite with type dscp.
choices: ['enabled', 'disabled']
default: disabled
'''
EXAMPLES = """
- name: Configure QoS
onyx_QoS:
interfaces:
- Mpo7
- Mpo7
trust: L3
rewrite_pcp: disabled
rewrite_dscp: enabled
- name: Configure QoS
onyx_QoS:
interfaces:
- Eth1/1
- Eth1/2
trust: both
rewrite_pcp: disabled
rewrite_dscp: enabled
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 qos trust L3
- interface mlag-port-channel 7 qos trust L3
- interface port-channel 1 qos trust L3
- interface mlag-port-channel 7 qos trust L2
- interface mlag-port-channel 7 qos rewrite dscp
- interface ethernet 1/16 qos rewrite pcp
- interface ethernet 1/1 no qos rewrite pcp
"""
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import show_cmd
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxQosModule(BaseOnyxModule):
TRUST_CMD = "interface {0} {1} qos trust {2}"
NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp"
NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp"
REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp"
REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp"
REWRITE_PCP = "pcp"
REWRITE_DSCP = "dscp"
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_PO_REGEX = re.compile(r"^Po(\d+)$")
MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$")
IF_TYPE_ETH = "ethernet"
PORT_CHANNEL = "port-channel"
MLAG_PORT_CHANNEL = "mlag-port-channel"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
PORT_CHANNEL: IF_PO_REGEX,
MLAG_PORT_CHANNEL: MLAG_NAME_REGEX
}
def init_module(self):
element_spec = dict(
interfaces=dict(type='list', required=True),
trust=dict(choices=['L2', 'L3', 'both'], default='L2'),
rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'),
rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id):
interface_qos_config = interface_qos_config[0].get(interface)
trust = interface_qos_config[0].get("Trust mode")
rewrite_dscp = interface_qos_config[0].get("DSCP rewrite")
rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite")
self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp,
rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id)
def _show_interface_qos(self, if_type, interface):
cmd = "show qos interface {0} {1}".format(if_type, interface)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
for interface in self._required_config.get("interfaces"):
if_type, if_id = self._get_interface_type(interface)
if not if_id:
self._module.fail_json(
msg='unsupported interface: {0}'.format(interface))
interface_qos_config = self._show_interface_qos(if_type, if_id)
if interface_qos_config is not None:
self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id)
else:
self._module.fail_json(
msg='Interface {0} does not exist on switch'.format(interface))
def generate_commands(self):
trust = self._required_config.get("trust")
rewrite_pcp = self._required_config.get("rewrite_pcp")
rewrite_dscp = self._required_config.get("rewrite_dscp")
for interface in self._required_config.get("interfaces"):
ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface)
self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust)
self._add_interface_rewrite_cmds(if_type, if_id, interface,
rewrite_pcp, rewrite_dscp)
def _get_current_rewrite_config(self, interface):
current_interface_qos_config = self._current_config.get(interface)
current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp')
current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp')
if_type = current_interface_qos_config.get("if_type")
if_id = current_interface_qos_config.get("if_id")
current_trust = current_interface_qos_config.get('trust')
return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id
def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if trust == "L3" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "L2" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "both" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id))
elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id))
elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite):
if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_pcp"] = "disabled"
elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_dscp"] = "disabled"
def main():
OnyxQosModule.main()
if __name__ == '__main__':
main()
| true
| true
|
79074ed7087ce6b5d4aa4e4b91b5ef94dda18097
| 6,022
|
py
|
Python
|
examples/hpgmg/hpgmgconf.py
|
kyushick/cdruntime
|
de08c79aad373c9715922294c67a7482c62ba9f2
|
[
"Unlicense"
] | null | null | null |
examples/hpgmg/hpgmgconf.py
|
kyushick/cdruntime
|
de08c79aad373c9715922294c67a7482c62ba9f2
|
[
"Unlicense"
] | null | null | null |
examples/hpgmg/hpgmgconf.py
|
kyushick/cdruntime
|
de08c79aad373c9715922294c67a7482c62ba9f2
|
[
"Unlicense"
] | null | null | null |
import sys,os
try:
import argparse
except ImportError:
print("""ERROR: Could not import argparse
Either use python2.7 or later (perhaps in a strange location such as
/bgsys/tools/python2.7.5-gnu-20130730/bin/hostpython) or install from
PyPI (https://pypi.python.org/pypi/argparse/).""")
sys.exit(1)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == os.errno.EEXIST:
pass
else: raise
def main():
parser = argparse.ArgumentParser(description='Configure High-performance Geometric Multigrid (HPGMG)')
parser.add_argument('--arch', help='Name of this configuration', default=None)
parser.add_argument('--petsc-dir', help='PETSC_DIR', default=os.environ.get('PETSC_DIR',''))
parser.add_argument('--petsc-arch', help='PETSC_ARCH', default=os.environ.get('PETSC_ARCH',''))
parser.add_argument('--with-hpm', help='libHPM profiling library on Blue Gene ("1" or "/path/to/libmpihpm.a /path/to/libbgpm.a")')
cf = parser.add_argument_group('Compilers and flags')
cf.add_argument('--CC', help='Path to C compiler', default=os.environ.get('CC',''))
cf.add_argument('--CFLAGS', help='Flags for C compiler', default=os.environ.get('CFLAGS',''))
cf.add_argument('--CPPFLAGS', help='Flags for C preprocessor', default=os.environ.get('CPPFLAGS',''))
cf.add_argument('--LDFLAGS', help='Flags to pass to linker', default=os.environ.get('LDFLAGS',''))
cf.add_argument('--LDLIBS', help='Libraries to pass to linker', default=os.environ.get('LDLIBS',''))
fe = parser.add_argument_group('Finite Element options')
fe.add_argument('--fe', action='store_true', dest='fe', help='Build the Finite-Element solver')
fv = parser.add_argument_group('Finite Volume options')
fv.add_argument('--no-fv', action='store_false', dest='fv', help='Do not build the Finite-Volume solver')
fv.add_argument('--no-fv-mpi', action='store_false', dest='fv_mpi', help='Use MPI')
fv.add_argument('--fv-cycle', help='Multigrid cycle type', choices=['V','F','U'], default='F')
fv.add_argument('--no-fv-subcomm', action='store_false', dest='fv_subcomm', help='Build a subcommunicator for each level in the MG v-cycle to minimize the scope of MPI_AllReduce()')
fv.add_argument('--fv-coarse-solver', help='Use BiCGStab as a bottom (coarse grid) solver', choices=['bicgstab','cabicgstab','cg','cacg'], default='bicgstab')
fv.add_argument('--fv-smoother', help='Multigrid smoother', choices=['cheby','gsrb','jacobi','l1jacobi'], default='gsrb')
args = parser.parse_args()
if args.arch is None:
args.arch = args.petsc_arch
if not args.arch:
args.arch = 'build'
mkdir_p(args.arch)
configure(args)
def configure(args):
open(os.path.join(args.arch,'Makefile'), 'w').write(makefile(args))
reconfname = os.path.join(args.arch,'reconfigure-%s.py' % args.arch)
open(reconfname, 'w').write('\n'.join([
'#!'+sys.executable,
'import os,sys',
'from argparse import Namespace',
"sys.path.insert(0, os.path.abspath('.'))",
'import hpgmgconf',
'hpgmgconf.configure(%r)' % args,
]))
os.chmod(reconfname,0o755)
print('Configuration complete in: %s' % os.path.realpath(args.arch))
print('To build: make -j3 -C %s' % args.arch)
def makefile(args):
if args.CC:
CC = args.CC
else:
if args.petsc_dir:
CC = '$(PCC)'
else:
CC = 'mpicc'
m = ['HPGMG_ARCH = %s' % args.arch,
'HPGMG_CC = %s' % CC,
'HPGMG_CFLAGS = %s' % (args.CFLAGS if args.CFLAGS else ('$(PCC_FLAGS) ' if args.petsc_dir else '')),
'HPGMG_CPPFLAGS = %s' % (('$(CCPPFLAGS) ' if args.petsc_dir else '') + args.CPPFLAGS),
'HPGMG_LDFLAGS = %s' % args.LDFLAGS,
'HPGMG_LDLIBS = %s' % args.LDLIBS,
'PETSC_DIR = %s' % args.petsc_dir,
'PETSC_ARCH = %s' % args.petsc_arch,
'PYTHON = %s' % sys.executable,
'SRCDIR = %s' % os.path.abspath(os.path.dirname(__name__)),]
if args.with_hpm:
m.append('CONFIG_HPM = y')
hpm_lib = args.with_hpm
try:
hpm_lib = int(hpm_lib)
except:
pass
if not isinstance(hpm_lib,str): # ALCF location
hpm_lib = '/soft/perftools/hpctw/lib/libmpihpm.a /bgsys/drivers/ppcfloor/bgpm/lib/libbgpm.a'
for p in hpm_lib.split():
assert os.path.exists(p), "HPM path '%s' not found" % p
m.append('HPGMG_LDLIBS += ' + hpm_lib)
m.append('HPGMG_CPPFLAGS += -DUSE_HPM=1')
if args.fv:
m.append('CONFIG_FV = y')
if args.fe and args.petsc_dir:
m.append('CONFIG_FE = y')
m.append('CONFIG_FV_CPPFLAGS = ' + hpgmg_fv_cflags(args))
if args.petsc_dir:
found = False
for variables_path in [os.path.join('lib', 'petsc', 'conf', 'variables'),
os.path.join('lib', 'petsc-conf', 'variables'),
os.path.join('conf', 'variables')]:
if os.path.exists(os.path.join(args.petsc_dir,variables_path)):
m.append('include $(PETSC_DIR)/' + variables_path)
found = True
if not found:
raise RuntimeError('Could not find PETSc variables file in PETSC_DIR=%s' % (args.petsc_dir,))
m.append('include $(SRCDIR)/base.mk\n')
return '\n'.join(m)
def hpgmg_fv_cflags(args):
defines = []
if args.fv_mpi:
defines.append('USE_MPI')
defines.append('USE_%s' % args.fv_coarse_solver.upper())
if args.fv_subcomm:
defines.append('USE_SUBCOMM')
defines.append('USE_%sCYCLES' % args.fv_cycle.upper())
defines.append('USE_%s' % args.fv_smoother.upper())
#defines.append('STENCIL_FUSE_DINV') # generally only good on compute-intensive architectures with good compilers
#defines.append('STENCIL_FUSE_BC')
return ' '.join('-D%s=1'%d for d in defines)
| 48.176
| 185
| 0.622551
|
import sys,os
try:
import argparse
except ImportError:
print("""ERROR: Could not import argparse
Either use python2.7 or later (perhaps in a strange location such as
/bgsys/tools/python2.7.5-gnu-20130730/bin/hostpython) or install from
PyPI (https://pypi.python.org/pypi/argparse/).""")
sys.exit(1)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == os.errno.EEXIST:
pass
else: raise
def main():
parser = argparse.ArgumentParser(description='Configure High-performance Geometric Multigrid (HPGMG)')
parser.add_argument('--arch', help='Name of this configuration', default=None)
parser.add_argument('--petsc-dir', help='PETSC_DIR', default=os.environ.get('PETSC_DIR',''))
parser.add_argument('--petsc-arch', help='PETSC_ARCH', default=os.environ.get('PETSC_ARCH',''))
parser.add_argument('--with-hpm', help='libHPM profiling library on Blue Gene ("1" or "/path/to/libmpihpm.a /path/to/libbgpm.a")')
cf = parser.add_argument_group('Compilers and flags')
cf.add_argument('--CC', help='Path to C compiler', default=os.environ.get('CC',''))
cf.add_argument('--CFLAGS', help='Flags for C compiler', default=os.environ.get('CFLAGS',''))
cf.add_argument('--CPPFLAGS', help='Flags for C preprocessor', default=os.environ.get('CPPFLAGS',''))
cf.add_argument('--LDFLAGS', help='Flags to pass to linker', default=os.environ.get('LDFLAGS',''))
cf.add_argument('--LDLIBS', help='Libraries to pass to linker', default=os.environ.get('LDLIBS',''))
fe = parser.add_argument_group('Finite Element options')
fe.add_argument('--fe', action='store_true', dest='fe', help='Build the Finite-Element solver')
fv = parser.add_argument_group('Finite Volume options')
fv.add_argument('--no-fv', action='store_false', dest='fv', help='Do not build the Finite-Volume solver')
fv.add_argument('--no-fv-mpi', action='store_false', dest='fv_mpi', help='Use MPI')
fv.add_argument('--fv-cycle', help='Multigrid cycle type', choices=['V','F','U'], default='F')
fv.add_argument('--no-fv-subcomm', action='store_false', dest='fv_subcomm', help='Build a subcommunicator for each level in the MG v-cycle to minimize the scope of MPI_AllReduce()')
fv.add_argument('--fv-coarse-solver', help='Use BiCGStab as a bottom (coarse grid) solver', choices=['bicgstab','cabicgstab','cg','cacg'], default='bicgstab')
fv.add_argument('--fv-smoother', help='Multigrid smoother', choices=['cheby','gsrb','jacobi','l1jacobi'], default='gsrb')
args = parser.parse_args()
if args.arch is None:
args.arch = args.petsc_arch
if not args.arch:
args.arch = 'build'
mkdir_p(args.arch)
configure(args)
def configure(args):
open(os.path.join(args.arch,'Makefile'), 'w').write(makefile(args))
reconfname = os.path.join(args.arch,'reconfigure-%s.py' % args.arch)
open(reconfname, 'w').write('\n'.join([
'#!'+sys.executable,
'import os,sys',
'from argparse import Namespace',
"sys.path.insert(0, os.path.abspath('.'))",
'import hpgmgconf',
'hpgmgconf.configure(%r)' % args,
]))
os.chmod(reconfname,0o755)
print('Configuration complete in: %s' % os.path.realpath(args.arch))
print('To build: make -j3 -C %s' % args.arch)
def makefile(args):
if args.CC:
CC = args.CC
else:
if args.petsc_dir:
CC = '$(PCC)'
else:
CC = 'mpicc'
m = ['HPGMG_ARCH = %s' % args.arch,
'HPGMG_CC = %s' % CC,
'HPGMG_CFLAGS = %s' % (args.CFLAGS if args.CFLAGS else ('$(PCC_FLAGS) ' if args.petsc_dir else '')),
'HPGMG_CPPFLAGS = %s' % (('$(CCPPFLAGS) ' if args.petsc_dir else '') + args.CPPFLAGS),
'HPGMG_LDFLAGS = %s' % args.LDFLAGS,
'HPGMG_LDLIBS = %s' % args.LDLIBS,
'PETSC_DIR = %s' % args.petsc_dir,
'PETSC_ARCH = %s' % args.petsc_arch,
'PYTHON = %s' % sys.executable,
'SRCDIR = %s' % os.path.abspath(os.path.dirname(__name__)),]
if args.with_hpm:
m.append('CONFIG_HPM = y')
hpm_lib = args.with_hpm
try:
hpm_lib = int(hpm_lib)
except:
pass
if not isinstance(hpm_lib,str):
hpm_lib = '/soft/perftools/hpctw/lib/libmpihpm.a /bgsys/drivers/ppcfloor/bgpm/lib/libbgpm.a'
for p in hpm_lib.split():
assert os.path.exists(p), "HPM path '%s' not found" % p
m.append('HPGMG_LDLIBS += ' + hpm_lib)
m.append('HPGMG_CPPFLAGS += -DUSE_HPM=1')
if args.fv:
m.append('CONFIG_FV = y')
if args.fe and args.petsc_dir:
m.append('CONFIG_FE = y')
m.append('CONFIG_FV_CPPFLAGS = ' + hpgmg_fv_cflags(args))
if args.petsc_dir:
found = False
for variables_path in [os.path.join('lib', 'petsc', 'conf', 'variables'),
os.path.join('lib', 'petsc-conf', 'variables'),
os.path.join('conf', 'variables')]:
if os.path.exists(os.path.join(args.petsc_dir,variables_path)):
m.append('include $(PETSC_DIR)/' + variables_path)
found = True
if not found:
raise RuntimeError('Could not find PETSc variables file in PETSC_DIR=%s' % (args.petsc_dir,))
m.append('include $(SRCDIR)/base.mk\n')
return '\n'.join(m)
def hpgmg_fv_cflags(args):
defines = []
if args.fv_mpi:
defines.append('USE_MPI')
defines.append('USE_%s' % args.fv_coarse_solver.upper())
if args.fv_subcomm:
defines.append('USE_SUBCOMM')
defines.append('USE_%sCYCLES' % args.fv_cycle.upper())
defines.append('USE_%s' % args.fv_smoother.upper())
| true
| true
|
79074efba33bf2529139acec90f826375b3a98e3
| 8,449
|
py
|
Python
|
kernel_image_puller.py
|
dummys/kernel-image-puller
|
c0154d5428215e120123530dbae1a01cb89631e6
|
[
"BSD-3-Clause"
] | null | null | null |
kernel_image_puller.py
|
dummys/kernel-image-puller
|
c0154d5428215e120123530dbae1a01cb89631e6
|
[
"BSD-3-Clause"
] | null | null | null |
kernel_image_puller.py
|
dummys/kernel-image-puller
|
c0154d5428215e120123530dbae1a01cb89631e6
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import os
import queue
import requests
import time
from threading import Thread
cri_sock = os.getenv("KIP_CRI_SOCK", "unix:///var/run/containerd/containerd.sock")
cri_client = os.getenv("KIP_CRI_CLI", False)
gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888")
num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2"))
num_retries = int(os.getenv("KIP_NUM_RETRIES", "3"))
interval = int(os.getenv("KIP_INTERVAL", "300"))
log_level = os.getenv("KIP_LOG_LEVEL", "INFO")
POLICY_IF_NOT_PRESENT = "IfNotPresent"
POLICY_ALYWAYS = "Always"
policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS)
policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT)
if cri_client or cri_client in ('Yes', 'yes', 'True', 'true'):
from docker.errors import NotFound
from cri_api.channel import Channel
from cri_api.images import Images
from cri_api.exceptions import ImageServiceException as APIError
class DockerMocker:
def __init__(self, cli):
self.cli=cli
def get(self, img_name):
ret=self.cli.get_image(img_name)
if ret is None:
raise NotFound
else:
return ret
def pull(self, img_name):
try:
self.cli.pull_image(img_name)
except APIError as err:
if "failed to resolve image" in str(err):
raise NotFound(err)
else:
raise APIError(err)
class CriClient:
def __init__(self, cri_sock):
self.channel=Channel(cri_sock)
self.cli=Images(self.channel)
self.images=DockerMocker(self.cli)
docker_client = CriClient(cri_sock)
else:
from docker.client import DockerClient
from docker.errors import APIError
from docker.errors import NotFound
docker_client = DockerClient.from_env()
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s')
def get_kernelspecs():
"""Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs"""
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if not resp.ok:
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json()
def fetch_image_names():
"""Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.
For process-proxy kernelspecs, the image names are contained in the config stanza - which
resides in the process-proxy stanza located in the metadata.
"""
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error("Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}".format(ex))
finally:
if kspecs is None:
return False
# Locate the configured images within the kernelspecs and add to set for duplicate management
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if metadata is not None:
process_proxy = metadata.get('process_proxy')
if process_proxy is not None:
config = process_proxy.get('config')
if config is not None:
image_name = config.get('image_name')
if image_name is not None:
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if executor_image_name is not None:
images.add(executor_image_name)
# Add the image names to the name queue
for image_name in images:
name_queue.put_nowait(image_name)
return True
def pull_image(image_name):
"""Pulls the image.
If the policy is `IfNotPresent` the set of pulled image names is
checked and, if present, the method returns. Otherwise, the pull attempt is made
and the set of pulled images is updated, when successful.
Since NotFound exceptions are tolerated, we trap for only that exception and let
the caller handle others.
"""
if policy == POLICY_IF_NOT_PRESENT:
if image_name in pulled_images:
# Image has been pulled, but make sure it still exists. If it doesn't exist
# let this drop through to actual pull
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".
format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name))
def puller():
"""Thread-based puller.
Gets image name from the queue and attempts to pull the image. Any issues, except
for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the
retries have been exceeded, the queue task is marked as done.
"""
while True:
image_name = name_queue.get()
if image_name is None:
break
i = 0
while i < num_retries:
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if i < num_retries:
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".
format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".
format(i, image_name, ex))
name_queue.task_done()
if __name__ == "__main__":
logger = logging.getLogger('kernel_image_puller')
logger.setLevel(log_level)
# Determine pull policy.
pulled_images = set()
if policy not in policies:
logger.warning("Invalid pull policy detected in KIP_PULL_POLICY: '{}'. Using policy '{}'.".
format(policy, POLICY_IF_NOT_PRESENT))
policy = POLICY_IF_NOT_PRESENT
logger.info("Starting Kernel Image Puller with the following parameters:")
logger.info("KIP_GATEWAY_HOST: {}".format(gateway_host))
logger.info("KIP_CRI_CLI: {}".format(cri_client))
logger.info("KIP_CRI_SOCK: {}".format(cri_sock))
logger.info("KIP_INTERVAL: {} secs".format(interval))
logger.info("KIP_NUM_PULLERS: {}".format(num_pullers))
logger.info("KIP_NUM_RETRIES: {}".format(num_retries))
logger.info("KIP_PULL_POLICY: {}".format(policy))
logger.info("KIP_LOG_LEVEL: {}\n".format(log_level))
# Create an empty queue and start the puller threads. The number of puller threads is configurable.
name_queue = queue.Queue()
threads = []
for i in range(num_pullers):
t = Thread(target=puller, name="t{}".format(i + 1))
t.start()
threads.append(t)
# Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues
# fetching the image names, wait the interval number of seconds and perform the operation again.
wait_interval = 5 # Start with 5 seconds to ensure EG service gets started...
time.sleep(wait_interval)
while True:
fetched = fetch_image_names()
if fetched:
wait_interval = interval # Once we have fetched kernelspecs, update wait_interval
name_queue.join()
logger.info("Images pulled. Sleeping {} seconds...\n".format(wait_interval))
else:
logger.info("Sleeping {} seconds to fetch image names...\n".format(wait_interval))
time.sleep(wait_interval)
| 37.057018
| 120
| 0.640549
|
import logging
import os
import queue
import requests
import time
from threading import Thread
cri_sock = os.getenv("KIP_CRI_SOCK", "unix:///var/run/containerd/containerd.sock")
cri_client = os.getenv("KIP_CRI_CLI", False)
gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888")
num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2"))
num_retries = int(os.getenv("KIP_NUM_RETRIES", "3"))
interval = int(os.getenv("KIP_INTERVAL", "300"))
log_level = os.getenv("KIP_LOG_LEVEL", "INFO")
POLICY_IF_NOT_PRESENT = "IfNotPresent"
POLICY_ALYWAYS = "Always"
policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS)
policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT)
if cri_client or cri_client in ('Yes', 'yes', 'True', 'true'):
from docker.errors import NotFound
from cri_api.channel import Channel
from cri_api.images import Images
from cri_api.exceptions import ImageServiceException as APIError
class DockerMocker:
def __init__(self, cli):
self.cli=cli
def get(self, img_name):
ret=self.cli.get_image(img_name)
if ret is None:
raise NotFound
else:
return ret
def pull(self, img_name):
try:
self.cli.pull_image(img_name)
except APIError as err:
if "failed to resolve image" in str(err):
raise NotFound(err)
else:
raise APIError(err)
class CriClient:
def __init__(self, cri_sock):
self.channel=Channel(cri_sock)
self.cli=Images(self.channel)
self.images=DockerMocker(self.cli)
docker_client = CriClient(cri_sock)
else:
from docker.client import DockerClient
from docker.errors import APIError
from docker.errors import NotFound
docker_client = DockerClient.from_env()
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s')
def get_kernelspecs():
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if not resp.ok:
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json()
def fetch_image_names():
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error("Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}".format(ex))
finally:
if kspecs is None:
return False
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if metadata is not None:
process_proxy = metadata.get('process_proxy')
if process_proxy is not None:
config = process_proxy.get('config')
if config is not None:
image_name = config.get('image_name')
if image_name is not None:
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if executor_image_name is not None:
images.add(executor_image_name)
for image_name in images:
name_queue.put_nowait(image_name)
return True
def pull_image(image_name):
if policy == POLICY_IF_NOT_PRESENT:
if image_name in pulled_images:
# let this drop through to actual pull
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".
format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name))
def puller():
while True:
image_name = name_queue.get()
if image_name is None:
break
i = 0
while i < num_retries:
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if i < num_retries:
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".
format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".
format(i, image_name, ex))
name_queue.task_done()
if __name__ == "__main__":
logger = logging.getLogger('kernel_image_puller')
logger.setLevel(log_level)
# Determine pull policy.
pulled_images = set()
if policy not in policies:
logger.warning("Invalid pull policy detected in KIP_PULL_POLICY: '{}'. Using policy '{}'.".
format(policy, POLICY_IF_NOT_PRESENT))
policy = POLICY_IF_NOT_PRESENT
logger.info("Starting Kernel Image Puller with the following parameters:")
logger.info("KIP_GATEWAY_HOST: {}".format(gateway_host))
logger.info("KIP_CRI_CLI: {}".format(cri_client))
logger.info("KIP_CRI_SOCK: {}".format(cri_sock))
logger.info("KIP_INTERVAL: {} secs".format(interval))
logger.info("KIP_NUM_PULLERS: {}".format(num_pullers))
logger.info("KIP_NUM_RETRIES: {}".format(num_retries))
logger.info("KIP_PULL_POLICY: {}".format(policy))
logger.info("KIP_LOG_LEVEL: {}\n".format(log_level))
# Create an empty queue and start the puller threads. The number of puller threads is configurable.
name_queue = queue.Queue()
threads = []
for i in range(num_pullers):
t = Thread(target=puller, name="t{}".format(i + 1))
t.start()
threads.append(t)
# Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues
# fetching the image names, wait the interval number of seconds and perform the operation again.
wait_interval = 5 # Start with 5 seconds to ensure EG service gets started...
time.sleep(wait_interval)
while True:
fetched = fetch_image_names()
if fetched:
wait_interval = interval # Once we have fetched kernelspecs, update wait_interval
name_queue.join()
logger.info("Images pulled. Sleeping {} seconds...\n".format(wait_interval))
else:
logger.info("Sleeping {} seconds to fetch image names...\n".format(wait_interval))
time.sleep(wait_interval)
| true
| true
|
7907502c72435035b8773b2e9d3de98a759ac87a
| 17,141
|
py
|
Python
|
evolution/diversity.py
|
narendasan/neural-mmo
|
36a588db0021cccd7275cebef2cbdc5ee8eb40d5
|
[
"MIT"
] | null | null | null |
evolution/diversity.py
|
narendasan/neural-mmo
|
36a588db0021cccd7275cebef2cbdc5ee8eb40d5
|
[
"MIT"
] | null | null | null |
evolution/diversity.py
|
narendasan/neural-mmo
|
36a588db0021cccd7275cebef2cbdc5ee8eb40d5
|
[
"MIT"
] | null | null | null |
from pdb import set_trace as TT
import numpy as np
import scipy
from scipy.spatial import ConvexHull
import skimage
from skimage.morphology import disk
import skbio
global trg_image
trg_image = None
def diversity_calc(config):
div_calc_name = config.FITNESS_METRIC
return get_div_calc(div_calc_name)
def get_div_calc(div_calc_name):
if div_calc_name == 'L2':
calc_diversity = calc_diversity_l2
elif div_calc_name == 'InvL2':
calc_diversity = calc_homogeneity_l2
elif div_calc_name == 'Differential':
calc_diversity = calc_differential_entropy
elif div_calc_name == 'Discrete':
calc_diversity = calc_discrete_entropy_2
elif div_calc_name == 'Hull':
calc_diversity = calc_convex_hull
elif div_calc_name == 'Sum':
calc_diversity = sum_experience
elif div_calc_name == 'Lifespans': # or config.FITNESS_METRIC == 'ALP':
calc_diversity = sum_lifespans
elif div_calc_name == 'Lifetimes':
calc_diversity = calc_mean_lifetime
elif div_calc_name == 'Actions':
calc_diversity = calc_mean_actions_matched
elif div_calc_name == 'MapTest':
calc_diversity = calc_local_map_entropy
elif div_calc_name == 'MapTestText':
calc_diversity = ham_text
get_trg_image()
elif div_calc_name == 'y_deltas':
calc_diversity = calc_y_deltas
elif div_calc_name == 'Scores' or config.FITNESS_METRIC == 'ALP':
calc_diversity = calc_scores
else:
raise Exception('Unsupported fitness function: {}'.format(config.FITNESS_METRIC))
return calc_diversity
def get_trg_image():
from PIL import Image, ImageDraw, ImageFont
font_size = 15
try:
font = ImageFont.truetype("arial.ttf", font_size)
except OSError:
try:
font = ImageFont.truetype("LiberationMono-Regular.ttf", font_size)
except OSError:
font = ImageFont.truetype("SFNSMono.ttf", 32)
global trg_image
trg_image = Image.new(mode = "RGB", size=(50, 50))
draw = ImageDraw.Draw(trg_image)
draw.text((1,1), "Evo", font=font, fill=(255,0,0))
draw.text((1,15), "NMMO", font=font, fill=(255,0,0))
draw.text((1,32), "¯\_(ツ)_/¯", font=font, fill=(255,0,0))
trg_image.save("trg_img.png")
trg_image = (np.array(trg_image)[:, :, 0] / 255 * 8).astype(np.uint8)
def ham_text(individual, config):
if trg_image is None:
get_trg_image()
map_arr = individual.chromosome.map_arr[10:-10, 10:-10]
return -(trg_image != map_arr).sum()
def calc_map_entropies(individual, config, verbose=False):
glob_ent = calc_global_map_entropy(individual, config)
loc_ent = calc_local_map_entropy(individual, config)
if verbose:
print('global entropy: {}\nlocal entropy: {}'.format(glob_ent, loc_ent))
return [glob_ent[0], loc_ent]
def calc_global_map_entropy(individual, config):
# FIXME: hack to ignore lava borders
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
ent = scipy.stats.entropy(np.bincount(map_arr.reshape(-1), minlength=individual.n_tiles))
ent = ent * 100 / np.log(individual.n_tiles)
return [ent]
def calc_local_map_entropy(individual, config):
# FIXME: hack to ignore lava borders
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
local_ent = skimage.filters.rank.entropy(map_arr, disk(3))
local_ent = local_ent.mean() * 100 / np.log2(individual.n_tiles)
return local_ent.item()
def get_pop_stats(agent_stats, pop=None):
# Get list of all populations for which we need stats
pops = agent_stats[0].keys() if pop is None else [pop]
# Get 1D array of agent stats
stats = [stats_i[p] for p in pops for stats_i in agent_stats]
if len(stats[0].shape) == 2:
# then rows correspond to agents so we stack them vertically (concatenate along axis 1)
return np.vstack(stats)
elif len(stats[0].shape) == 1:
# then each agent has a scalar value so we concatenate along axis 0
return np.hstack(stats)
raise Exception("Oy! Dafuk type o' agent data is this?")
def contract_by_lifespan(agent_stats, lifespans):
'''Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death
when rewarding diversity.'''
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
agent_skills = agent_stats + (weights * agent_deltas.T).T
return agent_skills
def expand_by_lifespan(agent_stats, lifespans):
'''Push agents further from their mean according to how short-lived they were. For punishing abundance of premature
death when rewarding homogeneity.'''
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
# Displace agents by at most 100 units (otherwise we will not punish agents at all if they are already perfectly
# homogenous, for example.
agent_deltas = agent_deltas / np.linalg.norm(agent_deltas) * 100
agent_skills = agent_stats - (weights * agent_deltas.T).T
return agent_skills
def calc_scores(agent_stats, skill_headers=None, verbose=False):
scores = np.hstack(agent_stats['scores'])
if verbose:
print('scores: {}'.format(scores))
return np.mean(scores)
def calc_mean_actions_matched(agent_stats, skill_headers=None, verbose=False):
actions_matched = np.hstack(agent_stats['actions_matched'])
if verbose:
print(actions_matched)
# print(agent_stats['lifespans'])
return np.mean(actions_matched)
def calc_y_deltas(agent_stats, skill_headers=None, verbose=False):
y_deltas = np.hstack(agent_stats['y_deltas'])
if verbose:
print('y_deltas: {}'.format(y_deltas))
return np.mean(y_deltas)
def calc_mean_lifetime(agent_stats, skill_headers=None, verbose=False, pop=None):
lifetimes = get_pop_stats(agent_stats['lifespans'], pop)
if len(lifetimes) != 0:
lifetimes = np.hstack(lifetimes)
else:
lifetimes = [0]
mean_lifetime = lifetimes.mean()
return mean_lifetime
def sum_lifespans(agent_stats, skill_headers=None, n_policies=1, verbose=False, pop=None):
lifespans = get_pop_stats(agent_stats['lifespans'], pop=pop)
score = lifespans.mean()
if verbose:
print('Mean lifespan, pop {}: {}'.format(pop, score))
return score
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None):
'''Simply take the sum of XP over skills and agents.'''
# No need to weight by lifespan, since high lifespan is a prerequisite for high XP.
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = np.vstack(agent_skills)
a_lifespans = np.hstack(lifespans)
n_agents, n_skills = a_skills.shape
mean_xp = a_skills.sum() / (n_agents * n_skills)
if verbose:
print('skills')
print(a_skills.T)
print('lifespans')
print(a_lifespans)
print('mean xp:', mean_xp)
print()
return mean_xp
def sigmoid_lifespan(x):
# This basically assumes max lifespan is at least 100. Larger max lifespans won't really be a problem since this
# function converges to 1.
res = 1 / (1 + np.exp(0.1*(-x+50)))
return res
def calc_differential_entropy(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = agent_skills
a_lifespans = lifespans
assert a_skills.shape[0] == a_lifespans.shape[0]
if verbose:
print(skill_headers)
print(a_skills.transpose())
print(len(agent_skills), 'populations')
print('lifespans')
print(a_lifespans)
if punish_youth:
# Below is an alternative way of weighting by lifespan
# weights = sigmoid_lifespan(a_lifespans)
# mean = np.average(a_skills, axis=0, weights=weights)
# cov = np.cov(a_skills,rowvar=0, aweights=weights)
# Instead, we'll just contract as usual
a_skills = contract_by_lifespan(a_skills, a_lifespans)
mean = np.average(a_skills, axis=0)
cov = np.cov(a_skills,rowvar=0)
gaussian = scipy.stats.multivariate_normal(mean=mean, cov=cov, allow_singular=True)
infos['gaussian'] = gaussian
score = gaussian.entropy()
if verbose:
print('score:', score)
return score
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
'''Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of
the agents when treated as points in this space.'''
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills = np.vstack(agent_skills)
n_skills = agent_skills.shape[1]
lifespans = np.hstack(lifespans)
if verbose:
print('skills:')
print(agent_skills.transpose())
print('lifespans:')
print(lifespans)
print(len(agent_stats['lifespans']), 'populations')
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
if n_skills == 1:
# Max distance, i.e. a 1D hull
score = agent_skills.max() - agent_skills.mean()
else:
try:
hull = ConvexHull(agent_skills, qhull_options='QJ')
infos['hull'] = hull
score = hull.volume
score = score ** (1 / n_skills)
except Exception as e:
print(e)
score = 0
if verbose:
print('score:', score)
return score
def calc_discrete_entropy_2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = agent_skills= np.vstack(agent_skills)
lifespans = np.hstack(lifespans)
n_agents = lifespans.shape[0]
if n_agents == 1:
return -np.float('inf')
n_skills = agent_skills.shape[1]
if verbose:
print('skills')
print(agent_skills_0.transpose())
print('lifespans')
print(lifespans)
agent_skills = np.where(agent_skills == 0, 0.0000001, agent_skills)
if punish_youth:
# Below is a v funky way of punishing by lifespan
# weights = sigmoid_lifespan(lifespans)
# # contract population toward mean according to lifespan
# # mean experience level for each agent
# mean_skill = agent_skills.mean(axis=1)
# # mean skill vector of an agent
# mean_agent = agent_skills.mean(axis=0)
# assert mean_skill.shape[0] == n_agents
# assert mean_agent.shape[0] == n_skills
# mean_skills = np.repeat(mean_skill.reshape(mean_skill.shape[0], 1), n_skills, axis=1)
# mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
# agent_deltas = agent_skills - mean_agents
# skill_deltas = agent_skills - mean_skills
# a_skills_skills = mean_agents + (weights * agent_deltas.transpose()).transpose()
# a_skills_agents = mean_skills + (weights * skill_deltas.transpose()).transpose()
# div_agents = skbio.diversity.alpha_diversity('shannon', a_skills_agents).mean()
# div_skills = skbio.diversity.alpha_diversity('shannon', a_skills_skills.transpose()).mean()
# We'll just do the usual
a_skills = contract_by_lifespan(agent_skills, lifespans)
div_agents = skbio.diversity.alpha_diversity('shannon', a_skills).mean()
div_skills = skbio.diversity.alpha_diversity('shannon', a_skills.transpose()).mean()
# div_lifespans = skbio.diversity.alpha_diversity('shannon', lifespans)
score = -(div_agents * div_skills)#/ div_lifespans#/ len(agent_skills)**2
score = score#* 100 #/ (n_agents * n_skills)
if verbose:
print('Score:', score)
return score
def calc_discrete_entropy(agent_stats, skill_headers=None, pop=None):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = np.vstack(agent_skills)
agent_lifespans = np.hstack(lifespans)
weights = sigmoid_lifespan(agent_lifespans)
agent_skills = agent_skills_0.transpose() * weights
agent_skills = agent_skills.transpose()
BASE_VAL = 0.0001
# split between skill and agent entropy
n_skills = len(agent_skills[0])
n_pop = len(agent_skills)
agent_sums = [sum(skills) for skills in agent_skills]
i = 0
# ensure that we will not be dividing by zero when computing probabilities
for a in agent_sums:
if a == 0:
agent_sums[i] = BASE_VAL * n_skills
i += 1
skill_sums = [0 for i in range(n_skills)]
for i in range(n_skills):
for a_skills in agent_skills:
skill_sums[i] += a_skills[i]
if skill_sums[i] == 0:
skill_sums[i] = BASE_VAL * n_pop
skill_ents = []
for i in range(n_skills):
skill_ent = 0
for j in range(n_pop):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / skill_sums[i]
if p == 0:
skill_ent += 0
else:
skill_ent += p * np.log(p)
skill_ent = skill_ent / (n_pop)
skill_ents.append(skill_ent)
agent_ents = []
for j in range(n_pop):
agent_ent = 0
for i in range(n_skills):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / agent_sums[j]
if p == 0:
agent_ent += 0
else:
agent_ent += p * np.log(p)
agent_ent = agent_ent / (n_skills)
agent_ents.append(agent_ent)
agent_score = np.mean(agent_ents)
skill_score = np.mean(skill_ents)
# score = (alpha * skill_score + (1 - alpha) * agent_score)
score = -(skill_score * agent_score)
score = score * 100#/ n_pop**2
print('agent skills:\n{}\n{}'.format(skill_headers, np.array(agent_skills_0.transpose())))
print('lifespans:\n{}'.format(lifespans))
# print('skill_ents:\n{}\nskill_mean:\n{}\nagent_ents:\n{}\nagent_mean:{}\nscore:\n{}\n'.format(
# np.array(skill_ents), skill_score, np.array(agent_ents), agent_score, score))
print('score:\n{}'.format(score))
return score
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
'''Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same
point in skill-space, with maximal lifespans.'''
if 'skills' not in agent_stats:
raise Exception('We should be including dead agents in this calculation, so we should get at least some skill '
'stats back here')
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = expand_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
# https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array
distances = np.sqrt(np.einsum('ijk, ijk->ij', a - b, a - b))
score = np.sum(distances) / n_agents ** 2
if verbose:
# print(skill_headers)
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return -score
def calc_diversity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=False):
if 'skills' not in agent_stats:
return 0
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
# https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array
distances = np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
score = np.sum(distances) / n_agents ** 2
if verbose:
# print(skill_headers)
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return score
DIV_CALCS = [(calc_diversity_l2, 'mean pairwise L2'), (calc_differential_entropy, 'differential entropy'), (calc_discrete_entropy_2, 'discrete entropy'), (calc_convex_hull, 'convex hull volume'), (sum_lifespans, 'lifespans')]
| 37.589912
| 225
| 0.687066
|
from pdb import set_trace as TT
import numpy as np
import scipy
from scipy.spatial import ConvexHull
import skimage
from skimage.morphology import disk
import skbio
global trg_image
trg_image = None
def diversity_calc(config):
div_calc_name = config.FITNESS_METRIC
return get_div_calc(div_calc_name)
def get_div_calc(div_calc_name):
if div_calc_name == 'L2':
calc_diversity = calc_diversity_l2
elif div_calc_name == 'InvL2':
calc_diversity = calc_homogeneity_l2
elif div_calc_name == 'Differential':
calc_diversity = calc_differential_entropy
elif div_calc_name == 'Discrete':
calc_diversity = calc_discrete_entropy_2
elif div_calc_name == 'Hull':
calc_diversity = calc_convex_hull
elif div_calc_name == 'Sum':
calc_diversity = sum_experience
elif div_calc_name == 'Lifespans':
calc_diversity = sum_lifespans
elif div_calc_name == 'Lifetimes':
calc_diversity = calc_mean_lifetime
elif div_calc_name == 'Actions':
calc_diversity = calc_mean_actions_matched
elif div_calc_name == 'MapTest':
calc_diversity = calc_local_map_entropy
elif div_calc_name == 'MapTestText':
calc_diversity = ham_text
get_trg_image()
elif div_calc_name == 'y_deltas':
calc_diversity = calc_y_deltas
elif div_calc_name == 'Scores' or config.FITNESS_METRIC == 'ALP':
calc_diversity = calc_scores
else:
raise Exception('Unsupported fitness function: {}'.format(config.FITNESS_METRIC))
return calc_diversity
def get_trg_image():
from PIL import Image, ImageDraw, ImageFont
font_size = 15
try:
font = ImageFont.truetype("arial.ttf", font_size)
except OSError:
try:
font = ImageFont.truetype("LiberationMono-Regular.ttf", font_size)
except OSError:
font = ImageFont.truetype("SFNSMono.ttf", 32)
global trg_image
trg_image = Image.new(mode = "RGB", size=(50, 50))
draw = ImageDraw.Draw(trg_image)
draw.text((1,1), "Evo", font=font, fill=(255,0,0))
draw.text((1,15), "NMMO", font=font, fill=(255,0,0))
draw.text((1,32), "¯\_(ツ)_/¯", font=font, fill=(255,0,0))
trg_image.save("trg_img.png")
trg_image = (np.array(trg_image)[:, :, 0] / 255 * 8).astype(np.uint8)
def ham_text(individual, config):
if trg_image is None:
get_trg_image()
map_arr = individual.chromosome.map_arr[10:-10, 10:-10]
return -(trg_image != map_arr).sum()
def calc_map_entropies(individual, config, verbose=False):
glob_ent = calc_global_map_entropy(individual, config)
loc_ent = calc_local_map_entropy(individual, config)
if verbose:
print('global entropy: {}\nlocal entropy: {}'.format(glob_ent, loc_ent))
return [glob_ent[0], loc_ent]
def calc_global_map_entropy(individual, config):
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
ent = scipy.stats.entropy(np.bincount(map_arr.reshape(-1), minlength=individual.n_tiles))
ent = ent * 100 / np.log(individual.n_tiles)
return [ent]
def calc_local_map_entropy(individual, config):
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
local_ent = skimage.filters.rank.entropy(map_arr, disk(3))
local_ent = local_ent.mean() * 100 / np.log2(individual.n_tiles)
return local_ent.item()
def get_pop_stats(agent_stats, pop=None):
pops = agent_stats[0].keys() if pop is None else [pop]
stats = [stats_i[p] for p in pops for stats_i in agent_stats]
if len(stats[0].shape) == 2:
return np.vstack(stats)
elif len(stats[0].shape) == 1:
return np.hstack(stats)
raise Exception("Oy! Dafuk type o' agent data is this?")
def contract_by_lifespan(agent_stats, lifespans):
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
agent_skills = agent_stats + (weights * agent_deltas.T).T
return agent_skills
def expand_by_lifespan(agent_stats, lifespans):
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
# Displace agents by at most 100 units (otherwise we will not punish agents at all if they are already perfectly
# homogenous, for example.
agent_deltas = agent_deltas / np.linalg.norm(agent_deltas) * 100
agent_skills = agent_stats - (weights * agent_deltas.T).T
return agent_skills
def calc_scores(agent_stats, skill_headers=None, verbose=False):
scores = np.hstack(agent_stats['scores'])
if verbose:
print('scores: {}'.format(scores))
return np.mean(scores)
def calc_mean_actions_matched(agent_stats, skill_headers=None, verbose=False):
actions_matched = np.hstack(agent_stats['actions_matched'])
if verbose:
print(actions_matched)
# print(agent_stats['lifespans'])
return np.mean(actions_matched)
def calc_y_deltas(agent_stats, skill_headers=None, verbose=False):
y_deltas = np.hstack(agent_stats['y_deltas'])
if verbose:
print('y_deltas: {}'.format(y_deltas))
return np.mean(y_deltas)
def calc_mean_lifetime(agent_stats, skill_headers=None, verbose=False, pop=None):
lifetimes = get_pop_stats(agent_stats['lifespans'], pop)
if len(lifetimes) != 0:
lifetimes = np.hstack(lifetimes)
else:
lifetimes = [0]
mean_lifetime = lifetimes.mean()
return mean_lifetime
def sum_lifespans(agent_stats, skill_headers=None, n_policies=1, verbose=False, pop=None):
lifespans = get_pop_stats(agent_stats['lifespans'], pop=pop)
score = lifespans.mean()
if verbose:
print('Mean lifespan, pop {}: {}'.format(pop, score))
return score
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None):
# No need to weight by lifespan, since high lifespan is a prerequisite for high XP.
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = np.vstack(agent_skills)
a_lifespans = np.hstack(lifespans)
n_agents, n_skills = a_skills.shape
mean_xp = a_skills.sum() / (n_agents * n_skills)
if verbose:
print('skills')
print(a_skills.T)
print('lifespans')
print(a_lifespans)
print('mean xp:', mean_xp)
print()
return mean_xp
def sigmoid_lifespan(x):
# This basically assumes max lifespan is at least 100. Larger max lifespans won't really be a problem since this
res = 1 / (1 + np.exp(0.1*(-x+50)))
return res
def calc_differential_entropy(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = agent_skills
a_lifespans = lifespans
assert a_skills.shape[0] == a_lifespans.shape[0]
if verbose:
print(skill_headers)
print(a_skills.transpose())
print(len(agent_skills), 'populations')
print('lifespans')
print(a_lifespans)
if punish_youth:
a_skills = contract_by_lifespan(a_skills, a_lifespans)
mean = np.average(a_skills, axis=0)
cov = np.cov(a_skills,rowvar=0)
gaussian = scipy.stats.multivariate_normal(mean=mean, cov=cov, allow_singular=True)
infos['gaussian'] = gaussian
score = gaussian.entropy()
if verbose:
print('score:', score)
return score
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills = np.vstack(agent_skills)
n_skills = agent_skills.shape[1]
lifespans = np.hstack(lifespans)
if verbose:
print('skills:')
print(agent_skills.transpose())
print('lifespans:')
print(lifespans)
print(len(agent_stats['lifespans']), 'populations')
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
if n_skills == 1:
# Max distance, i.e. a 1D hull
score = agent_skills.max() - agent_skills.mean()
else:
try:
hull = ConvexHull(agent_skills, qhull_options='QJ')
infos['hull'] = hull
score = hull.volume
score = score ** (1 / n_skills)
except Exception as e:
print(e)
score = 0
if verbose:
print('score:', score)
return score
def calc_discrete_entropy_2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = agent_skills= np.vstack(agent_skills)
lifespans = np.hstack(lifespans)
n_agents = lifespans.shape[0]
if n_agents == 1:
return -np.float('inf')
n_skills = agent_skills.shape[1]
if verbose:
print('skills')
print(agent_skills_0.transpose())
print('lifespans')
print(lifespans)
agent_skills = np.where(agent_skills == 0, 0.0000001, agent_skills)
if punish_youth:
# Below is a v funky way of punishing by lifespan
# weights = sigmoid_lifespan(lifespans)
# # contract population toward mean according to lifespan
# # mean experience level for each agent
# mean_skill = agent_skills.mean(axis=1)
# # mean skill vector of an agent
# mean_agent = agent_skills.mean(axis=0)
# assert mean_skill.shape[0] == n_agents
# assert mean_agent.shape[0] == n_skills
# mean_skills = np.repeat(mean_skill.reshape(mean_skill.shape[0], 1), n_skills, axis=1)
# mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
# agent_deltas = agent_skills - mean_agents
# skill_deltas = agent_skills - mean_skills
# a_skills_skills = mean_agents + (weights * agent_deltas.transpose()).transpose()
# a_skills_agents = mean_skills + (weights * skill_deltas.transpose()).transpose()
# div_agents = skbio.diversity.alpha_diversity('shannon', a_skills_agents).mean()
# div_skills = skbio.diversity.alpha_diversity('shannon', a_skills_skills.transpose()).mean()
# We'll just do the usual
a_skills = contract_by_lifespan(agent_skills, lifespans)
div_agents = skbio.diversity.alpha_diversity('shannon', a_skills).mean()
div_skills = skbio.diversity.alpha_diversity('shannon', a_skills.transpose()).mean()
score = -(div_agents * div_skills)Score:', score)
return score
def calc_discrete_entropy(agent_stats, skill_headers=None, pop=None):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = np.vstack(agent_skills)
agent_lifespans = np.hstack(lifespans)
weights = sigmoid_lifespan(agent_lifespans)
agent_skills = agent_skills_0.transpose() * weights
agent_skills = agent_skills.transpose()
BASE_VAL = 0.0001
n_skills = len(agent_skills[0])
n_pop = len(agent_skills)
agent_sums = [sum(skills) for skills in agent_skills]
i = 0
for a in agent_sums:
if a == 0:
agent_sums[i] = BASE_VAL * n_skills
i += 1
skill_sums = [0 for i in range(n_skills)]
for i in range(n_skills):
for a_skills in agent_skills:
skill_sums[i] += a_skills[i]
if skill_sums[i] == 0:
skill_sums[i] = BASE_VAL * n_pop
skill_ents = []
for i in range(n_skills):
skill_ent = 0
for j in range(n_pop):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / skill_sums[i]
if p == 0:
skill_ent += 0
else:
skill_ent += p * np.log(p)
skill_ent = skill_ent / (n_pop)
skill_ents.append(skill_ent)
agent_ents = []
for j in range(n_pop):
agent_ent = 0
for i in range(n_skills):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / agent_sums[j]
if p == 0:
agent_ent += 0
else:
agent_ent += p * np.log(p)
agent_ent = agent_ent / (n_skills)
agent_ents.append(agent_ent)
agent_score = np.mean(agent_ents)
skill_score = np.mean(skill_ents)
score = -(skill_score * agent_score)
score = score * 100
print('agent skills:\n{}\n{}'.format(skill_headers, np.array(agent_skills_0.transpose())))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}'.format(score))
return score
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
if 'skills' not in agent_stats:
raise Exception('We should be including dead agents in this calculation, so we should get at least some skill '
'stats back here')
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = expand_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
distances = np.sqrt(np.einsum('ijk, ijk->ij', a - b, a - b))
score = np.sum(distances) / n_agents ** 2
if verbose:
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return -score
def calc_diversity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=False):
if 'skills' not in agent_stats:
return 0
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
distances = np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
score = np.sum(distances) / n_agents ** 2
if verbose:
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return score
DIV_CALCS = [(calc_diversity_l2, 'mean pairwise L2'), (calc_differential_entropy, 'differential entropy'), (calc_discrete_entropy_2, 'discrete entropy'), (calc_convex_hull, 'convex hull volume'), (sum_lifespans, 'lifespans')]
| true
| true
|
7907503e02306ed1a233f84ffd3ba20e4f641627
| 5,052
|
py
|
Python
|
dash_sql_client_ui.py
|
xiangjerryhe/sql-ui-dash
|
857cab34332599550076fa01c385be1258e41fb2
|
[
"MIT"
] | 1
|
2022-01-31T07:43:08.000Z
|
2022-01-31T07:43:08.000Z
|
dash_sql_client_ui.py
|
xiangjerryhe/sql-ui-dash
|
857cab34332599550076fa01c385be1258e41fb2
|
[
"MIT"
] | null | null | null |
dash_sql_client_ui.py
|
xiangjerryhe/sql-ui-dash
|
857cab34332599550076fa01c385be1258e41fb2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
__author__ = "Jerry He"
import dash_bootstrap_components as dbc
from dash import dcc, no_update
from dash_extensions.enrich import Dash, Output, Input, State, html
import flask
from flask import jsonify
from flask_cors import CORS
from dash import dash_table
import dash_ace
server = flask.Flask(__name__)
CORS(server)
from dash_extensions.enrich import DashProxy,ServersideOutput, TriggerTransform, MultiplexerTransform, ServersideOutputTransform, NoOutputTransform
app = DashProxy(__name__,
server=server,
transforms=[
ServersideOutputTransform(), # enable use of ServersideOutput objects
],
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
server = app.server
import pandas as pd
def row_tf(row):
keep = ['title', 'userid']
newrow = {k:row[k] for k in keep}
newrow['name'] = newrow['title'].split("-")[0].strip()
return newrow
def df_transform(df):
return pd.DataFrame([row_tf(row) for _,row in df.iterrows()])
app.layout = html.Div(
[
dcc.Store(id="querystr"),
dcc.Store(id="store"),
dcc.Store(id="all-df"),
dcc.Interval(interval=1800, id="query_sto"),
dbc.Card([
dbc.CardImg(src="assets/brick_header.jpg"),
dbc.CardBody([
dbc.Tabs(
[
dbc.Tab([
html.Hr(),
dash_ace.DashAceEditor(
id='query-input',
value=r"SELECT * FROM my_music_collection WHERE artist like '%Jr%' LIMIT 8",
theme='github',
mode='sql',
tabSize=2,
height="35px",
enableBasicAutocompletion=True,
enableLiveAutocompletion=True,
autocompleter='/autocompleter?prefix=',
placeholder='SQL code ...'
),
dbc.Button("Query", color="secondary", className="me-1",
id='query-button'),
html.Hr(),
html.Div(id="query-output")
],label="SQL", tab_id="tab-1"),
dbc.Tab(label="History", tab_id="tab-2"),
],
id="tabs",
active_tab="tab-1",
),
html.Div(id="tab-content"),
])
])
]
)
import json
app.clientside_callback("""
function(n_intervals, data) {
var existing_data;
if(data) {
existing_data = JSON.parse(data)
}
var editor = ace.edit("query-input")
if(!existing_data || existing_data['querystr'] != editor.getValue().trim()) {
return JSON.stringify({
'querystr':editor.getValue().trim(),
'time':(new Date()).toISOString()
})
}
}
""".strip(),
Output("querystr", "data"), Input("query_sto",'n_intervals'), State("querystr", "data"))
from sqlalchemy import create_engine
engine = create_engine('postgresql://localhost:5432/jerry') # change this to your SQL endpoint/auth
import logging
import dateutil.parser
@app.callback(ServersideOutput("store", "data"), Input('query-button', 'n_clicks'),State("querystr", "data"), memoize=True)
def query(n_clicks, query_data):
if query_data is None:
return no_update
qdata = json.loads(query_data)
try:
dat = pd.read_sql(qdata["querystr"].replace("%", "%%"), con=engine)
return dat
except:
logging.exception("SQL query failed\n")
from datetime import datetime
@app.callback(Output("query-output", "children"), ServersideOutput("all-df", "data"), Input("store", "data"), State("all-df", "data"))
def render_query_res_table(data, all_df):
df = df_transform(data)
df = df[sorted(df.columns.tolist())]
if all_df is None:
all_df = [{'df':df, 'time':datetime.now()}]
else:
all_df.append({'df':df, 'time':datetime.now()})
return [dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)],all_df
@app.callback(Output("tab-content", "children"), [Input("tabs", "active_tab"), State("all-df", "data")])
def switch_tab(at, all_df):
if at == "tab-1":
return []
elif at == "tab-2":
return dbc.Accordion(
[
dbc.AccordionItem([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in query_hist['df'].columns],
data=query_hist['df'].to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)
], title = query_hist['time'].strftime("%H:%M:%S")) for query_hist in all_df
])
return html.P("This shouldn't ever be displayed...")
@server.route('/autocompleter', methods=['GET'])
def autocompleter():
return jsonify([{"name": "Completed", "value": "Completed", "score": 100, "meta": "test"}])
app.run_server(host="127.0.0.1", debug=True, port=8080)
| 31.575
| 147
| 0.582344
|
__author__ = "Jerry He"
import dash_bootstrap_components as dbc
from dash import dcc, no_update
from dash_extensions.enrich import Dash, Output, Input, State, html
import flask
from flask import jsonify
from flask_cors import CORS
from dash import dash_table
import dash_ace
server = flask.Flask(__name__)
CORS(server)
from dash_extensions.enrich import DashProxy,ServersideOutput, TriggerTransform, MultiplexerTransform, ServersideOutputTransform, NoOutputTransform
app = DashProxy(__name__,
server=server,
transforms=[
ServersideOutputTransform(),
],
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
server = app.server
import pandas as pd
def row_tf(row):
keep = ['title', 'userid']
newrow = {k:row[k] for k in keep}
newrow['name'] = newrow['title'].split("-")[0].strip()
return newrow
def df_transform(df):
return pd.DataFrame([row_tf(row) for _,row in df.iterrows()])
app.layout = html.Div(
[
dcc.Store(id="querystr"),
dcc.Store(id="store"),
dcc.Store(id="all-df"),
dcc.Interval(interval=1800, id="query_sto"),
dbc.Card([
dbc.CardImg(src="assets/brick_header.jpg"),
dbc.CardBody([
dbc.Tabs(
[
dbc.Tab([
html.Hr(),
dash_ace.DashAceEditor(
id='query-input',
value=r"SELECT * FROM my_music_collection WHERE artist like '%Jr%' LIMIT 8",
theme='github',
mode='sql',
tabSize=2,
height="35px",
enableBasicAutocompletion=True,
enableLiveAutocompletion=True,
autocompleter='/autocompleter?prefix=',
placeholder='SQL code ...'
),
dbc.Button("Query", color="secondary", className="me-1",
id='query-button'),
html.Hr(),
html.Div(id="query-output")
],label="SQL", tab_id="tab-1"),
dbc.Tab(label="History", tab_id="tab-2"),
],
id="tabs",
active_tab="tab-1",
),
html.Div(id="tab-content"),
])
])
]
)
import json
app.clientside_callback("""
function(n_intervals, data) {
var existing_data;
if(data) {
existing_data = JSON.parse(data)
}
var editor = ace.edit("query-input")
if(!existing_data || existing_data['querystr'] != editor.getValue().trim()) {
return JSON.stringify({
'querystr':editor.getValue().trim(),
'time':(new Date()).toISOString()
})
}
}
""".strip(),
Output("querystr", "data"), Input("query_sto",'n_intervals'), State("querystr", "data"))
from sqlalchemy import create_engine
engine = create_engine('postgresql://localhost:5432/jerry')
import logging
import dateutil.parser
@app.callback(ServersideOutput("store", "data"), Input('query-button', 'n_clicks'),State("querystr", "data"), memoize=True)
def query(n_clicks, query_data):
if query_data is None:
return no_update
qdata = json.loads(query_data)
try:
dat = pd.read_sql(qdata["querystr"].replace("%", "%%"), con=engine)
return dat
except:
logging.exception("SQL query failed\n")
from datetime import datetime
@app.callback(Output("query-output", "children"), ServersideOutput("all-df", "data"), Input("store", "data"), State("all-df", "data"))
def render_query_res_table(data, all_df):
df = df_transform(data)
df = df[sorted(df.columns.tolist())]
if all_df is None:
all_df = [{'df':df, 'time':datetime.now()}]
else:
all_df.append({'df':df, 'time':datetime.now()})
return [dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)],all_df
@app.callback(Output("tab-content", "children"), [Input("tabs", "active_tab"), State("all-df", "data")])
def switch_tab(at, all_df):
if at == "tab-1":
return []
elif at == "tab-2":
return dbc.Accordion(
[
dbc.AccordionItem([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in query_hist['df'].columns],
data=query_hist['df'].to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)
], title = query_hist['time'].strftime("%H:%M:%S")) for query_hist in all_df
])
return html.P("This shouldn't ever be displayed...")
@server.route('/autocompleter', methods=['GET'])
def autocompleter():
return jsonify([{"name": "Completed", "value": "Completed", "score": 100, "meta": "test"}])
app.run_server(host="127.0.0.1", debug=True, port=8080)
| true
| true
|
7907508e4670aeed6a99961267cd8c80b279bb7c
| 737
|
py
|
Python
|
king_libs/sort_val.py
|
jacktamin/king-tools
|
87724ba8afaec6d5153b31377e63c0f7238b82c3
|
[
"MIT"
] | null | null | null |
king_libs/sort_val.py
|
jacktamin/king-tools
|
87724ba8afaec6d5153b31377e63c0f7238b82c3
|
[
"MIT"
] | null | null | null |
king_libs/sort_val.py
|
jacktamin/king-tools
|
87724ba8afaec6d5153b31377e63c0f7238b82c3
|
[
"MIT"
] | null | null | null |
class Sort_dic:
def __init__(self):
pass
@staticmethod
def sort_values(dic,rev=False,sort_by= 'values'):
if sort_by == 'values':
sv = sorted(dic.values(),reverse=rev)
new_dic = {}
for num in sv :
for k,v in dic.items():
if num == v:
new_dic[k] = v
return new_dic
elif sort_by == 'keys':
sk = sorted(dic.keys(),reverse=rev)
new_dic = {}
for num in sk :
for k,v in dic.items():
if k==num:
new_dic[k] = v
return new_dic
| 26.321429
| 54
| 0.385346
|
class Sort_dic:
def __init__(self):
pass
@staticmethod
def sort_values(dic,rev=False,sort_by= 'values'):
if sort_by == 'values':
sv = sorted(dic.values(),reverse=rev)
new_dic = {}
for num in sv :
for k,v in dic.items():
if num == v:
new_dic[k] = v
return new_dic
elif sort_by == 'keys':
sk = sorted(dic.keys(),reverse=rev)
new_dic = {}
for num in sk :
for k,v in dic.items():
if k==num:
new_dic[k] = v
return new_dic
| true
| true
|
790750935a9164202629c5a890fd9cc46bd02342
| 4,116
|
py
|
Python
|
tensorflow_model_optimization/__init__.py
|
ptesan777/model-optimization
|
3fec5a74209e5a6a2b6ac603632b4a00ab523b36
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init module for TensorFlow Model Optimization Python API.
```
import tensorflow_model_optimization as tfmot
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tensorflow_model_optimization namespace. Hence, we disable this lint check
# throughout the file.
#
# pylint: disable=g-import-not-at-top
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
# To ensure users only access the expected public API, the API structure is
# created in the `api` directory. Import all api modules.
# pylint: disable=wildcard-import
from tensorflow_model_optimization.python.core.api import *
# pylint: enable=wildcard-import
# Use sparsity module to fetch the path for the `api` directory.
# This handles all techniques, not just sparsity.
_API_MODULE = sparsity # pylint: disable=undefined-variable
# Returns $(install_dir)/tensorflow_model_optimization/api
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
# Add the `api` directory to `__path__` so that `from * import module` works.
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
# Delete python module so that users only access the code using the API path
# rather than using the code directory structure.
# This will disallow usage such as `tfmot.python.core.sparsity.keras`.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
# pylint: enable=undefined-variable
| 36.424779
| 84
| 0.744412
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _ensure_tf_install():
try:
import tensorflow as tf
except ImportError:
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
from tensorflow_model_optimization.python.core.api import *
_API_MODULE = sparsity
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
try:
del python
except NameError:
pass
| true
| true
|
790750a59b605357e76747719703dbade1951824
| 35,842
|
py
|
Python
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes
# pylint: disable=super-init-not-called, too-many-lines
from enum import Enum
from azure.storage.blob import LeaseProperties as BlobLeaseProperties
from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
from azure.storage.blob import ResourceTypes as BlobResourceTypes
from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
from azure.storage.blob import ContentSettings as BlobContentSettings
from azure.storage.blob import AccessPolicy as BlobAccessPolicy
from azure.storage.blob import DelimitedTextDialect as BlobDelimitedTextDialect
from azure.storage.blob import DelimitedJsonDialect as BlobDelimitedJSON
from azure.storage.blob import ArrowDialect as BlobArrowDialect
from azure.storage.blob._models import ContainerPropertiesPaged
from ._shared.models import DictMixin
class FileSystemProperties(object):
"""File System properties class.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the file system was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the file system.
:ivar str public_access: Specifies whether data in the file system may be accessed
publicly and the level of access.
:ivar bool has_immutability_policy:
Represents whether the file system has an immutability policy.
:ivar bool has_legal_hold:
Represents whether the file system has a legal hold.
:ivar dict metadata: A dict with name-value pairs to associate with the
file system as metadata.
Returned ``FileSystemProperties`` instances expose these values through a
dictionary interface, for example: ``file_system_props["last_modified"]``.
Additionally, the file system name is available as ``file_system_props["name"]``.
"""
def __init__(self):
self.name = None
self.last_modified = None
self.etag = None
self.lease = None
self.public_access = None
self.has_immutability_policy = None
self.has_legal_hold = None
self.metadata = None
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
generated.properties.public_access)
props.has_immutability_policy = generated.properties.has_immutability_policy
props.has_legal_hold = generated.properties.has_legal_hold
props.metadata = generated.metadata
return props
@classmethod
def _convert_from_container_props(cls, container_properties):
container_properties.__class__ = cls
container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
container_properties.public_access)
container_properties.lease.__class__ = LeaseProperties
return container_properties
class FileSystemPropertiesPaged(ContainerPropertiesPaged):
"""An Iterable of File System properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A file system name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only file systems whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of file system names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, *args, **kwargs):
super(FileSystemPropertiesPaged, self).__init__(
*args,
**kwargs
)
@staticmethod
def _build_item(item):
return FileSystemProperties._from_generated(item) # pylint: disable=protected-access
class DirectoryProperties(DictMixin):
"""
:ivar str name: name of the directory
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool deleted: if the current directory marked as deleted
:ivar dict metadata: Name-value pairs associated with the directory as metadata.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the directory.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the directory was modified.
:ivar ~datetime.datetime creation_time:
Indicates when the directory was created, in UTC.
:ivar int remaining_retention_days: The number of days that the directory will be retained
before being permanently deleted by the service.
:var ~azure.storage.filedatalake.ContentSettings content_settings:
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.deleted_time = None
self.remaining_retention_days = None
class FileProperties(DictMixin):
"""
:ivar str name: name of the file
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool deleted: if the current file marked as deleted
:ivar dict metadata: Name-value pairs associated with the file as metadata.
:ivar ~azure.storage.filedatalake.LeaseProperties lease:
Stores all the lease information for the file.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the file was modified.
:ivar ~datetime.datetime creation_time:
Indicates when the file was created, in UTC.
:ivar int size: size of the file
:ivar int remaining_retention_days: The number of days that the file will be retained
before being permanently deleted by the service.
:var ~azure.storage.filedatalake.ContentSettings content_settings:
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.size = kwargs.get('Content-Length')
self.deleted_time = None
self.expiry_time = kwargs.get("x-ms-expiry-time")
self.remaining_retention_days = None
self.content_settings = ContentSettings(**kwargs)
class PathProperties(object):
"""Path properties listed by get_paths api.
:ivar str name: the full path for a file or directory.
:ivar str owner: The owner of the file or directory.
:ivar str group: he owning group of the file or directory.
:ivar str permissions: Sets POSIX access permissions for the file
owner, the file owning group, and others. Each class may be granted
read, write, or execute permission. The sticky bit is also supported.
Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
supported.
:ivar datetime last_modified: A datetime object representing the last time the directory/file was modified.
:ivar bool is_directory: is the path a directory or not.
:ivar str etag: The ETag contains a value that you can use to perform operations
conditionally.
:ivar content_length: the size of file if the path is a file.
"""
def __init__(self, **kwargs):
super(PathProperties, self).__init__(
**kwargs
)
self.name = kwargs.pop('name', None)
self.owner = kwargs.get('owner', None)
self.group = kwargs.get('group', None)
self.permissions = kwargs.get('permissions', None)
self.last_modified = kwargs.get('last_modified', None)
self.is_directory = kwargs.get('is_directory', False)
self.etag = kwargs.get('etag', None)
self.content_length = kwargs.get('content_length', None)
@classmethod
def _from_generated(cls, generated):
path_prop = PathProperties()
path_prop.name = generated.name
path_prop.owner = generated.owner
path_prop.group = generated.group
path_prop.permissions = generated.permissions
path_prop.last_modified = generated.last_modified
path_prop.is_directory = bool(generated.is_directory)
path_prop.etag = generated.additional_properties.get('etag')
path_prop.content_length = generated.content_length
return path_prop
class LeaseProperties(BlobLeaseProperties):
"""DataLake Lease Properties.
:ivar str status:
The lease status of the file. Possible values: locked|unlocked
:ivar str state:
Lease state of the file. Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a file is leased, specifies whether the lease is of infinite or fixed duration.
"""
class ContentSettings(BlobContentSettings):
"""The content settings of a file or directory.
:ivar str content_type:
The content type specified for the file or directory. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If the content_encoding has previously been set
for the file, that value is stored.
:ivar str content_language:
If the content_language has previously been set
for the file, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:ivar str cache_control:
If the cache_control has previously been set for
the file, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
:keyword str content_type:
The content type specified for the file or directory. If no content type was
specified, the default content type is application/octet-stream.
:keyword str content_encoding:
If the content_encoding has previously been set
for the file, that value is stored.
:keyword str content_language:
If the content_language has previously been set
for the file, that value is stored.
:keyword str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:keyword str cache_control:
If the cache_control has previously been set for
the file, that value is stored.
:keyword str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
"""
def __init__(
self, **kwargs):
super(ContentSettings, self).__init__(
**kwargs
)
class AccountSasPermissions(BlobAccountSasPermissions):
def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
create=False):
super(AccountSasPermissions, self).__init__(
read=read, create=create, write=write, list=list,
delete=delete
)
class FileSystemSasPermissions(object):
"""FileSystemSasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_file_system_sas` function.
:param bool read:
Read the content, properties, metadata etc.
:param bool write:
Create or write content, properties, metadata. Lease the file system.
:param bool delete:
Delete the file system.
:param bool list:
List paths in the file system.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
**kwargs):
self.read = read
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a FileSystemSasPermissions from a string.
To specify read, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A FileSystemSasPermissions object
:rtype: ~azure.storage.fildatalake.FileSystemSasPermissions
"""
p_read = 'r' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class DirectorySasPermissions(object):
"""DirectorySasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_directory_sas` function.
:param bool read:
Read the content, properties, metadata etc.
:param bool create:
Create a new directory
:param bool write:
Create or write content, properties, metadata. Lease the directory.
:param bool delete:
Delete the directory.
:keyword bool list:
List any files in the directory. Implies Execute.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, create=False, write=False,
delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = kwargs.pop('list', None)
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a DirectorySasPermissions from a string.
To specify read, create, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A DirectorySasPermissions object
:rtype: ~azure.storage.filedatalake.DirectorySasPermissions
"""
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class FileSasPermissions(object):
"""FileSasPermissions class to be used with the
:func:`~azure.storage.filedatalake.generate_file_sas` function.
:param bool read:
Read the content, properties, metadata etc. Use the file as
the source of a read operation.
:param bool create:
Write a new file
:param bool write:
Create or write content, properties, metadata. Lease the file.
:param bool delete:
Delete the file.
:keyword bool move:
Move any file in the directory to a new location.
Note the move operation can optionally be restricted to the child file or directory owner or
the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
on the parent directory.
:keyword bool execute:
Get the status (system defined properties) and ACL of any file in the directory.
If the caller is the owner, set access control on any file in the directory.
:keyword bool manage_ownership:
Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
within a folder that has the sticky bit set.
:keyword bool manage_access_control:
Allows the user to set permissions and POSIX ACLs on files and directories.
"""
def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a FileSasPermissions from a string.
To specify read, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A FileSasPermissions object
:rtype: ~azure.storage.fildatalake.FileSasPermissions
"""
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class AccessPolicy(BlobAccessPolicy):
"""Access Policy class used by the set and get access policy methods in each service.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.datalake.FileSystemSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: ~datetime.datetime or str
"""
def __init__(self, permission=None, expiry=None, **kwargs):
super(AccessPolicy, self).__init__(
permission=permission, expiry=expiry, start=kwargs.pop('start', None)
)
class ResourceTypes(BlobResourceTypes):
"""
Specifies the resource types that are accessible with the account SAS.
:param bool service:
Access to service-level APIs (e.g.List File Systems)
:param bool file_system:
Access to file_system-level APIs (e.g., Create/Delete file system,
List Directories/Files)
:param bool object:
Access to object-level APIs for
files(e.g. Create File, etc.)
"""
def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin
):
super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
class UserDelegationKey(BlobUserDelegationKey):
"""
Represents a user delegation key, provided to the user by Azure Storage
based on their Azure Active Directory access token.
The fields are saved as simple strings since the user does not have to interact with this object;
to generate an identify SAS, the user can simply pass it to the right API.
:ivar str signed_oid:
Object ID of this token.
:ivar str signed_tid:
Tenant ID of the tenant that issued this token.
:ivar str signed_start:
The datetime this token becomes valid.
:ivar str signed_expiry:
The datetime this token expires.
:ivar str signed_service:
What service this key is valid for.
:ivar str signed_version:
The version identifier of the REST service that created this token.
:ivar str value:
The user delegation key.
"""
@classmethod
def _from_generated(cls, generated):
delegation_key = cls()
delegation_key.signed_oid = generated.signed_oid
delegation_key.signed_tid = generated.signed_tid
delegation_key.signed_start = generated.signed_start
delegation_key.signed_expiry = generated.signed_expiry
delegation_key.signed_service = generated.signed_service
delegation_key.signed_version = generated.signed_version
delegation_key.value = generated.value
return delegation_key
class PublicAccess(str, Enum):
"""
Specifies whether data in the file system may be accessed publicly and the level of access.
"""
File = 'blob'
"""
Specifies public read access for files. file data within this file system can be read
via anonymous request, but file system data is not available. Clients cannot enumerate
files within the container via anonymous request.
"""
FileSystem = 'container'
"""
Specifies full public read access for file system and file data. Clients can enumerate
files within the file system via anonymous request, but cannot enumerate file systems
within the storage account.
"""
@classmethod
def _from_generated(cls, public_access):
if public_access == "blob": # pylint:disable=no-else-return
return cls.File
elif public_access == "container":
return cls.FileSystem
return None
class LocationMode(object):
"""
Specifies the location the request should be sent to. This mode only applies
for RA-GRS accounts which allow secondary read access. All other account types
must use PRIMARY.
"""
PRIMARY = 'primary' #: Requests should be sent to the primary location.
SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible.
class DelimitedJsonDialect(BlobDelimitedJSON):
"""Defines the input or output JSON serialization for a datalake query.
:keyword str delimiter: The line separator character, default value is '\n'
"""
class DelimitedTextDialect(BlobDelimitedTextDialect):
"""Defines the input or output delimited (CSV) serialization for a datalake query request.
:keyword str delimiter:
Column separator, defaults to ','.
:keyword str quotechar:
Field quote, defaults to '"'.
:keyword str lineterminator:
Record separator, defaults to '\n'.
:keyword str escapechar:
Escape char, defaults to empty.
:keyword bool has_header:
Whether the blob data includes headers in the first line. The default value is False, meaning that the
data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
of the first line.
"""
class ArrowDialect(BlobArrowDialect):
"""field of an arrow schema.
All required parameters must be populated in order to send to Azure.
:param str type: Required.
:keyword str name: The name of the field.
:keyword int precision: The precision of the field.
:keyword int scale: The scale of the field.
"""
class ArrowType(str, Enum):
INT64 = "int64"
BOOL = "bool"
TIMESTAMP_MS = "timestamp[ms]"
STRING = "string"
DOUBLE = "double"
DECIMAL = 'decimal'
class DataLakeFileQueryError(object):
"""The error happened during quick query operation.
:ivar str error:
The name of the error.
:ivar bool is_fatal:
If true, this error prevents further query processing. More result data may be returned,
but there is no guarantee that all of the original data will be processed.
If false, this error does not prevent further query processing.
:ivar str description:
A description of the error.
:ivar int position:
The blob offset at which the error occurred.
"""
def __init__(self, error=None, is_fatal=False, description=None, position=None):
self.error = error
self.is_fatal = is_fatal
self.description = description
self.position = position
class AccessControlChangeCounters(DictMixin):
"""
AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively.
:ivar int directories_successful:
Number of directories where Access Control List has been updated successfully.
:ivar int files_successful:
Number of files where Access Control List has been updated successfully.
:ivar int failure_count:
Number of paths where Access Control List update has failed.
"""
def __init__(self, directories_successful, files_successful, failure_count):
self.directories_successful = directories_successful
self.files_successful = files_successful
self.failure_count = failure_count
class AccessControlChangeResult(DictMixin):
"""
AccessControlChangeResult contains result of operations that change Access Control Lists recursively.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters:
Contains counts of paths changed from start of the operation.
:ivar str continuation:
Optional continuation token.
Value is present when operation is split into multiple batches and can be used to resume progress.
"""
def __init__(self, counters, continuation):
self.counters = counters
self.continuation = continuation
class AccessControlChangeFailure(DictMixin):
"""
Represents an entry that failed to update Access Control List.
:ivar str name:
Name of the entry.
:ivar bool is_directory:
Indicates whether the entry is a directory.
:ivar str error_message:
Indicates the reason why the entry failed to update.
"""
def __init__(self, name, is_directory, error_message):
self.name = name
self.is_directory = is_directory
self.error_message = error_message
class AccessControlChanges(DictMixin):
"""
AccessControlChanges contains batch and cumulative counts of operations
that change Access Control Lists recursively.
Additionally it exposes path entries that failed to update while these operations progress.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters:
Contains counts of paths changed within single batch.
:ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters:
Contains counts of paths changed from start of the operation.
:ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures:
List of path entries that failed to update Access Control List within single batch.
:ivar str continuation:
An opaque continuation token that may be used to resume the operations in case of failures.
"""
def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
self.batch_counters = batch_counters
self.aggregate_counters = aggregate_counters
self.batch_failures = batch_failures
self.continuation = continuation
class DataLakeAclChangeFailedError(Exception):
"""The error happened during set/update/remove acl recursive operation.
:ivar ~azure.core.exceptions.AzureError error:
The exception.
:ivar str description:
A description of the error.
:ivar str continuation:
An opaque continuation token that may be used to resume the operations in case of failures.
"""
def __init__(self, error, description, continuation):
self.error = error
self.description = description
self.continuation = continuation
| 42.770883
| 117
| 0.685843
|
from enum import Enum
from azure.storage.blob import LeaseProperties as BlobLeaseProperties
from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
from azure.storage.blob import ResourceTypes as BlobResourceTypes
from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
from azure.storage.blob import ContentSettings as BlobContentSettings
from azure.storage.blob import AccessPolicy as BlobAccessPolicy
from azure.storage.blob import DelimitedTextDialect as BlobDelimitedTextDialect
from azure.storage.blob import DelimitedJsonDialect as BlobDelimitedJSON
from azure.storage.blob import ArrowDialect as BlobArrowDialect
from azure.storage.blob._models import ContainerPropertiesPaged
from ._shared.models import DictMixin
class FileSystemProperties(object):
def __init__(self):
self.name = None
self.last_modified = None
self.etag = None
self.lease = None
self.public_access = None
self.has_immutability_policy = None
self.has_legal_hold = None
self.metadata = None
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.lease = LeaseProperties._from_generated(generated)
props.public_access = PublicAccess._from_generated(
generated.properties.public_access)
props.has_immutability_policy = generated.properties.has_immutability_policy
props.has_legal_hold = generated.properties.has_legal_hold
props.metadata = generated.metadata
return props
@classmethod
def _convert_from_container_props(cls, container_properties):
container_properties.__class__ = cls
container_properties.public_access = PublicAccess._from_generated(
container_properties.public_access)
container_properties.lease.__class__ = LeaseProperties
return container_properties
class FileSystemPropertiesPaged(ContainerPropertiesPaged):
def __init__(self, *args, **kwargs):
super(FileSystemPropertiesPaged, self).__init__(
*args,
**kwargs
)
@staticmethod
def _build_item(item):
return FileSystemProperties._from_generated(item)
class DirectoryProperties(DictMixin):
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.deleted_time = None
self.remaining_retention_days = None
class FileProperties(DictMixin):
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.etag = kwargs.get('ETag')
self.deleted = None
self.metadata = kwargs.get('metadata')
self.lease = LeaseProperties(**kwargs)
self.last_modified = kwargs.get('Last-Modified')
self.creation_time = kwargs.get('x-ms-creation-time')
self.size = kwargs.get('Content-Length')
self.deleted_time = None
self.expiry_time = kwargs.get("x-ms-expiry-time")
self.remaining_retention_days = None
self.content_settings = ContentSettings(**kwargs)
class PathProperties(object):
def __init__(self, **kwargs):
super(PathProperties, self).__init__(
**kwargs
)
self.name = kwargs.pop('name', None)
self.owner = kwargs.get('owner', None)
self.group = kwargs.get('group', None)
self.permissions = kwargs.get('permissions', None)
self.last_modified = kwargs.get('last_modified', None)
self.is_directory = kwargs.get('is_directory', False)
self.etag = kwargs.get('etag', None)
self.content_length = kwargs.get('content_length', None)
@classmethod
def _from_generated(cls, generated):
path_prop = PathProperties()
path_prop.name = generated.name
path_prop.owner = generated.owner
path_prop.group = generated.group
path_prop.permissions = generated.permissions
path_prop.last_modified = generated.last_modified
path_prop.is_directory = bool(generated.is_directory)
path_prop.etag = generated.additional_properties.get('etag')
path_prop.content_length = generated.content_length
return path_prop
class LeaseProperties(BlobLeaseProperties):
class ContentSettings(BlobContentSettings):
def __init__(
self, **kwargs):
super(ContentSettings, self).__init__(
**kwargs
)
class AccountSasPermissions(BlobAccountSasPermissions):
def __init__(self, read=False, write=False, delete=False, list=False,
create=False):
super(AccountSasPermissions, self).__init__(
read=read, create=create, write=write, list=list,
delete=delete
)
class FileSystemSasPermissions(object):
def __init__(self, read=False, write=False, delete=False, list=False,
**kwargs):
self.read = read
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
p_read = 'r' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class DirectorySasPermissions(object):
def __init__(self, read=False, create=False, write=False,
delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = kwargs.pop('list', None)
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class FileSasPermissions(object):
def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
self.read = read
self.create = create
self.write = write
self.delete = delete
self.list = list
self.move = kwargs.pop('move', None)
self.execute = kwargs.pop('execute', None)
self.manage_ownership = kwargs.pop('manage_ownership', None)
self.manage_access_control = kwargs.pop('manage_access_control', None)
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('m' if self.move else '') +
('e' if self.execute else '') +
('o' if self.manage_ownership else '') +
('p' if self.manage_access_control else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_move = 'm' in permission
p_execute = 'e' in permission
p_manage_ownership = 'o' in permission
p_manage_access_control = 'p' in permission
parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
manage_access_control=p_manage_access_control)
return parsed
class AccessPolicy(BlobAccessPolicy):
def __init__(self, permission=None, expiry=None, **kwargs):
super(AccessPolicy, self).__init__(
permission=permission, expiry=expiry, start=kwargs.pop('start', None)
)
class ResourceTypes(BlobResourceTypes):
def __init__(self, service=False, file_system=False, object=False
):
super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
class UserDelegationKey(BlobUserDelegationKey):
@classmethod
def _from_generated(cls, generated):
delegation_key = cls()
delegation_key.signed_oid = generated.signed_oid
delegation_key.signed_tid = generated.signed_tid
delegation_key.signed_start = generated.signed_start
delegation_key.signed_expiry = generated.signed_expiry
delegation_key.signed_service = generated.signed_service
delegation_key.signed_version = generated.signed_version
delegation_key.value = generated.value
return delegation_key
class PublicAccess(str, Enum):
File = 'blob'
FileSystem = 'container'
@classmethod
def _from_generated(cls, public_access):
if public_access == "blob":
return cls.File
elif public_access == "container":
return cls.FileSystem
return None
class LocationMode(object):
PRIMARY = 'primary'
SECONDARY = 'secondary'
class DelimitedJsonDialect(BlobDelimitedJSON):
class DelimitedTextDialect(BlobDelimitedTextDialect):
class ArrowDialect(BlobArrowDialect):
class ArrowType(str, Enum):
INT64 = "int64"
BOOL = "bool"
TIMESTAMP_MS = "timestamp[ms]"
STRING = "string"
DOUBLE = "double"
DECIMAL = 'decimal'
class DataLakeFileQueryError(object):
def __init__(self, error=None, is_fatal=False, description=None, position=None):
self.error = error
self.is_fatal = is_fatal
self.description = description
self.position = position
class AccessControlChangeCounters(DictMixin):
def __init__(self, directories_successful, files_successful, failure_count):
self.directories_successful = directories_successful
self.files_successful = files_successful
self.failure_count = failure_count
class AccessControlChangeResult(DictMixin):
def __init__(self, counters, continuation):
self.counters = counters
self.continuation = continuation
class AccessControlChangeFailure(DictMixin):
def __init__(self, name, is_directory, error_message):
self.name = name
self.is_directory = is_directory
self.error_message = error_message
class AccessControlChanges(DictMixin):
def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
self.batch_counters = batch_counters
self.aggregate_counters = aggregate_counters
self.batch_failures = batch_failures
self.continuation = continuation
class DataLakeAclChangeFailedError(Exception):
def __init__(self, error, description, continuation):
self.error = error
self.description = description
self.continuation = continuation
| true
| true
|
79075282ff1af64fd6ff8b4851528927b9ed050a
| 19,538
|
py
|
Python
|
make_train_test_split.py
|
jnwei/deep-molecular-massspec
|
f82884a076a00fd45185dc303cd67efbc0c26bac
|
[
"Apache-2.0"
] | 79
|
2018-06-28T15:54:10.000Z
|
2022-03-27T12:41:57.000Z
|
make_train_test_split.py
|
jingxual/deep-molecular-massspec
|
b82b40b57441b939da5899dfb575b284d20cea8e
|
[
"Apache-2.0"
] | 17
|
2018-08-20T17:44:05.000Z
|
2021-02-25T15:48:02.000Z
|
make_train_test_split.py
|
jingxual/deep-molecular-massspec
|
b82b40b57441b939da5899dfb575b284d20cea8e
|
[
"Apache-2.0"
] | 32
|
2018-05-31T14:56:37.000Z
|
2022-03-27T12:41:58.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Creates datasets from the NIST sdf files and makes experiment setup jsons.
This module first breaks up the main NIST library dataset into a train/
validation/test set, and the replicates library into a validation and test set.
As all the molecules in the replicates file are also in the main NIST library,
the mainlib datasets will exclude inchikeys from the replicates library. All the
molecules in both datasets are to be included in one of these datasets, unless
an argument is passed for mainlib_maximum_num_molecules_to_use or
replicates_maximum_num_molecules_to_use.
The component datasets are saved as TFRecords, by the names defined in
dataset_setup_constants and the library from which the data came
(e.g. mainlib_train_from_mainlib.tfrecord). This will result in 7 TFRecord files
total, one each for the train/validation/test splits from the main library, and
two each for the replicates validation/test splits, one with its data from the
mainlib NIST file, and the other from the replicates file.
For each experiment setup included in
dataset_setup_constants.EXPERIMENT_SETUPS_LIST, a json file is written. This
json file name the files to be used for each part of the experiment, i.e.
library matching, spectra prediction.
Note: Reading sdf files from cns currently not supported.
Example usage:
make_train_test_split.py \
--main_sdf_name=testdata/test_14_mend.sdf
--replicates_sdf_name=testdata/test_2_mend.sdf \
--output_master_dir=<output_dir_name>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
from absl import app
from absl import flags
import dataset_setup_constants as ds_constants
import mass_spec_constants as ms_constants
import parse_sdf_utils
import train_test_split_utils
import six
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
'main_sdf_name', 'testdata/test_14_mend.sdf',
'specify full path of sdf file to parse, to be used for'
' training sets, and validation/test sets')
flags.DEFINE_string(
'replicates_sdf_name',
'testdata/test_2_mend.sdf',
'specify full path of a second sdf file to parse, to be'
' used for the vaildation/test set. Molecules in this sdf'
' will be excluded from the main train/val/test sets.')
# Note: For family based splitting, all molecules passing the filter will be
# placed in validation/test datasets, and then split according to the relative
# ratio between the validation/test fractions. If these are both equal to 0.0,
# these values will be over written to 0.5 and 0.5.
flags.DEFINE_list(
'main_train_val_test_fractions', '1.0,0.0,0.0',
'specify how large to make the train, val, and test sets'
' as a fraction of the whole dataset.')
flags.DEFINE_integer('mainlib_maximum_num_molecules_to_use', None,
'specify how many total samples to use for parsing')
flags.DEFINE_integer('replicates_maximum_num_molecules_to_use', None,
'specify how many total samples to use for parsing')
flags.DEFINE_list(
'replicates_train_val_test_fractions', '0.0,0.5,0.5',
'specify fraction of replicates molecules to use for'
' for the three replicates sample files.')
flags.DEFINE_enum(
'splitting_type', 'random', ['random', 'steroid', 'diazo'],
'specify splitting method to use for creating '
'training/validation/test sets')
flags.DEFINE_string('output_master_dir', '/tmp/output_dataset_dir',
'specify directory to save records')
flags.DEFINE_integer('max_atoms', ms_constants.MAX_ATOMS,
'specify maximum number of atoms to allow')
flags.DEFINE_integer('max_mass_spec_peak_loc', ms_constants.MAX_PEAK_LOC,
'specify greatest m/z spectrum peak to allow')
INCHIKEY_FILENAME_END = '.inchikey.txt'
TFRECORD_FILENAME_END = '.tfrecord'
NP_LIBRARY_ARRAY_END = '.spectra_library.npy'
FROM_MAINLIB_FILENAME_MODIFIER = '_from_mainlib'
FROM_REPLICATES_FILENAME_MODIFIER = '_from_replicates'
def make_mainlib_replicates_train_test_split(
mainlib_mol_list,
replicates_mol_list,
splitting_type,
mainlib_fractions,
replicates_fractions,
mainlib_maximum_num_molecules_to_use=None,
replicates_maximum_num_molecules_to_use=None,
rseed=42):
"""Makes train/validation/test inchikey lists from two lists of rdkit.Mol.
Args:
mainlib_mol_list : list of molecules from main library
replicates_mol_list : list of molecules from replicates library
splitting_type : type of splitting to use for validation splits.
mainlib_fractions : TrainValTestFractions namedtuple
holding desired fractions for train/val/test split of mainlib
replicates_fractions : TrainValTestFractions namedtuple
holding desired fractions for train/val/test split of replicates.
For the replicates set, the train fraction should be set to 0.
mainlib_maximum_num_molecules_to_use : Largest number of molecules to use
when making datasets from mainlib
replicates_maximum_num_molecules_to_use : Largest number of molecules to use
when making datasets from replicates
rseed : random seed for shuffling
Returns:
main_inchikey_dict : Dict that is keyed by inchikey, containing a list of
rdkit.Mol objects corresponding to that inchikey from the mainlib
replicates_inchikey_dict : Dict that is keyed by inchikey, containing a list
of rdkit.Mol objects corresponding to that inchikey from the replicates
library
main_replicates_split_inchikey_lists_dict : dict with keys :
'mainlib_train', 'mainlib_validation', 'mainlib_test',
'replicates_train', 'replicates_validation', 'replicates_test'
Values are lists of inchikeys corresponding to each dataset.
"""
random.seed(rseed)
main_inchikey_dict = train_test_split_utils.make_inchikey_dict(
mainlib_mol_list)
main_inchikey_list = main_inchikey_dict.keys()
if six.PY3:
main_inchikey_list = list(main_inchikey_list)
if mainlib_maximum_num_molecules_to_use is not None:
main_inchikey_list = random.sample(main_inchikey_list,
mainlib_maximum_num_molecules_to_use)
replicates_inchikey_dict = train_test_split_utils.make_inchikey_dict(
replicates_mol_list)
replicates_inchikey_list = replicates_inchikey_dict.keys()
if six.PY3:
replicates_inchikey_list = list(replicates_inchikey_list)
if replicates_maximum_num_molecules_to_use is not None:
replicates_inchikey_list = random.sample(
replicates_inchikey_list, replicates_maximum_num_molecules_to_use)
# Make train/val/test splits for main dataset.
main_train_validation_test_inchikeys = (
train_test_split_utils.make_train_val_test_split_inchikey_lists(
main_inchikey_list,
main_inchikey_dict,
mainlib_fractions,
holdout_inchikey_list=replicates_inchikey_list,
splitting_type=splitting_type))
# Make train/val/test splits for replicates dataset.
replicates_validation_test_inchikeys = (
train_test_split_utils.make_train_val_test_split_inchikey_lists(
replicates_inchikey_list,
replicates_inchikey_dict,
replicates_fractions,
splitting_type=splitting_type))
component_inchikey_dict = {
ds_constants.MAINLIB_TRAIN_BASENAME:
main_train_validation_test_inchikeys.train,
ds_constants.MAINLIB_VALIDATION_BASENAME:
main_train_validation_test_inchikeys.validation,
ds_constants.MAINLIB_TEST_BASENAME:
main_train_validation_test_inchikeys.test,
ds_constants.REPLICATES_TRAIN_BASENAME:
replicates_validation_test_inchikeys.train,
ds_constants.REPLICATES_VALIDATION_BASENAME:
replicates_validation_test_inchikeys.validation,
ds_constants.REPLICATES_TEST_BASENAME:
replicates_validation_test_inchikeys.test
}
train_test_split_utils.assert_all_lists_mutally_exclusive(
list(component_inchikey_dict.values()))
# Test that the set of the 5 component inchikey lists is equal to the set of
# inchikeys in the main library.
all_inchikeys_in_components = []
for ikey_list in list(component_inchikey_dict.values()):
for ikey in ikey_list:
all_inchikeys_in_components.append(ikey)
assert set(main_inchikey_list + replicates_inchikey_list) == set(
all_inchikeys_in_components
), ('The inchikeys in the original inchikey dictionary are not all included'
' in the train/val/test component libraries')
return (main_inchikey_dict, replicates_inchikey_dict, component_inchikey_dict)
def write_list_of_inchikeys(inchikey_list, base_name, output_dir):
"""Write list of inchikeys as a text file."""
inchikey_list_name = base_name + INCHIKEY_FILENAME_END
with tf.gfile.Open(os.path.join(output_dir, inchikey_list_name),
'w') as writer:
for inchikey in inchikey_list:
writer.write('%s\n' % inchikey)
def write_all_dataset_files(inchikey_dict,
inchikey_list,
base_name,
output_dir,
max_atoms,
max_mass_spec_peak_loc,
make_library_array=False):
"""Helper function for writing all the files associated with a TFRecord.
Args:
inchikey_dict : Full dictionary keyed by inchikey containing lists of
rdkit.Mol objects
inchikey_list : List of inchikeys to include in dataset
base_name : Base name for the dataset
output_dir : Path for saving all TFRecord files
max_atoms : Maximum number of atoms to include for a given molecule
max_mass_spec_peak_loc : Largest m/z peak to include in a spectra.
make_library_array : Flag for whether to make library array
Returns:
Saves 3 files:
basename.tfrecord : a TFRecord file,
basename.inchikey.txt : a text file with all the inchikeys in the dataset
basename.tfrecord.info: a text file with one line describing
the length of the TFRecord file.
Also saves if make_library_array is set:
basename.npz : see parse_sdf_utils.write_dicts_to_example
"""
record_name = base_name + TFRECORD_FILENAME_END
mol_list = train_test_split_utils.make_mol_list_from_inchikey_dict(
inchikey_dict, inchikey_list)
if make_library_array:
library_array_pathname = base_name + NP_LIBRARY_ARRAY_END
parse_sdf_utils.write_dicts_to_example(
mol_list, os.path.join(output_dir, record_name),
max_atoms, max_mass_spec_peak_loc,
os.path.join(output_dir, library_array_pathname))
else:
parse_sdf_utils.write_dicts_to_example(
mol_list, os.path.join(output_dir, record_name), max_atoms,
max_mass_spec_peak_loc)
write_list_of_inchikeys(inchikey_list, base_name, output_dir)
parse_sdf_utils.write_info_file(mol_list, os.path.join(
output_dir, record_name))
def write_mainlib_split_datasets(component_inchikey_dict, mainlib_inchikey_dict,
output_dir, max_atoms, max_mass_spec_peak_loc):
"""Write all train/val/test set TFRecords from main NIST sdf file."""
for component_kwarg in component_inchikey_dict.keys():
component_mainlib_filename = (
component_kwarg + FROM_MAINLIB_FILENAME_MODIFIER)
if component_kwarg == ds_constants.MAINLIB_TRAIN_BASENAME:
write_all_dataset_files(
mainlib_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_mainlib_filename,
output_dir,
max_atoms,
max_mass_spec_peak_loc,
make_library_array=True)
else:
write_all_dataset_files(mainlib_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_mainlib_filename, output_dir, max_atoms,
max_mass_spec_peak_loc)
def write_replicates_split_datasets(component_inchikey_dict,
replicates_inchikey_dict, output_dir,
max_atoms, max_mass_spec_peak_loc):
"""Write replicates val/test set TFRecords from replicates sdf file."""
for component_kwarg in [
ds_constants.REPLICATES_VALIDATION_BASENAME,
ds_constants.REPLICATES_TEST_BASENAME
]:
component_replicates_filename = (
component_kwarg + FROM_REPLICATES_FILENAME_MODIFIER)
write_all_dataset_files(replicates_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_replicates_filename, output_dir,
max_atoms, max_mass_spec_peak_loc)
def combine_inchikey_sets(dataset_subdivision_list, dataset_split_dict):
"""A function to combine lists of inchikeys that are values from a dict.
Args:
dataset_subdivision_list: List of keys in dataset_split_dict to combine
into one list
dataset_split_dict: dict containing keys in dataset_subdivision_list, with
lists of inchikeys as values.
Returns:
A list of inchikeys.
"""
dataset_inchikey_list = []
for dataset_subdivision_name in dataset_subdivision_list:
dataset_inchikey_list.extend(dataset_split_dict[dataset_subdivision_name])
return dataset_inchikey_list
def check_experiment_setup(experiment_setup_dict, component_inchikey_dict):
"""Validates experiment setup for given lists of inchikeys."""
# Check that the union of the library matching observed and library
# matching predicted sets are equal to the set of inchikeys in the
# mainlib_inchikey_dict
all_inchikeys_in_library = (
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_OBSERVED_KEY],
component_inchikey_dict) +
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_PREDICTED_KEY],
component_inchikey_dict))
all_inchikeys_in_use = []
for kwarg in component_inchikey_dict.keys():
all_inchikeys_in_use.extend(component_inchikey_dict[kwarg])
assert set(all_inchikeys_in_use) == set(all_inchikeys_in_library), (
'Inchikeys in library for library matching does not match full dataset.')
# Check that all inchikeys in query are found in full library of inchikeys.
assert set(
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_QUERY_KEY],
component_inchikey_dict)).issubset(set(all_inchikeys_in_library)), (
'Inchikeys in query set for library matching not'
'found in library.')
def write_json_for_experiment(experiment_setup, output_dir):
"""Writes json for experiment, recording relevant files for each component.
Writes a json containing a list of TFRecord file names to read
for each experiment component, i.e. spectrum_prediction, library_matching.
Args:
experiment_setup: A dataset_setup_constants.ExperimentSetup tuple
output_dir: directory to write json
Returns:
Writes json recording which files to load for each component
of the experiment
Raises:
ValueError: if the experiment component is not specified to be taken from
either the main NIST library or the replicates library.
"""
experiment_json_dict = {}
for dataset_kwarg in experiment_setup.experiment_setup_dataset_dict:
if dataset_kwarg in experiment_setup.data_to_get_from_mainlib:
experiment_json_dict[dataset_kwarg] = [
(component_basename + FROM_MAINLIB_FILENAME_MODIFIER +
TFRECORD_FILENAME_END) for component_basename in
experiment_setup.experiment_setup_dataset_dict[dataset_kwarg]
]
elif dataset_kwarg in experiment_setup.data_to_get_from_replicates:
experiment_json_dict[dataset_kwarg] = [
(component_basename + FROM_REPLICATES_FILENAME_MODIFIER +
TFRECORD_FILENAME_END) for component_basename in
experiment_setup.experiment_setup_dataset_dict[dataset_kwarg]
]
else:
raise ValueError('Did not specify origin for {}.'.format(dataset_kwarg))
training_spectra_filename = (
ds_constants.MAINLIB_TRAIN_BASENAME + FROM_MAINLIB_FILENAME_MODIFIER +
NP_LIBRARY_ARRAY_END)
experiment_json_dict[
ds_constants.TRAINING_SPECTRA_ARRAY_KEY] = training_spectra_filename
with tf.gfile.Open(os.path.join(output_dir, experiment_setup.json_name),
'w') as writer:
experiment_json = json.dumps(experiment_json_dict)
writer.write(experiment_json)
def main(_):
tf.gfile.MkDir(FLAGS.output_master_dir)
main_train_val_test_fractions_tuple = tuple(
[float(elem) for elem in FLAGS.main_train_val_test_fractions])
main_train_val_test_fractions = train_test_split_utils.TrainValTestFractions(
*main_train_val_test_fractions_tuple)
replicates_train_val_test_fractions_tuple = tuple(
[float(elem) for elem in FLAGS.replicates_train_val_test_fractions])
replicates_train_val_test_fractions = (
train_test_split_utils.TrainValTestFractions(
*replicates_train_val_test_fractions_tuple))
mainlib_mol_list = parse_sdf_utils.get_sdf_to_mol(
FLAGS.main_sdf_name, max_atoms=FLAGS.max_atoms)
replicates_mol_list = parse_sdf_utils.get_sdf_to_mol(
FLAGS.replicates_sdf_name, max_atoms=FLAGS.max_atoms)
# Breaks the inchikeys lists into train/validation/test splits.
(mainlib_inchikey_dict, replicates_inchikey_dict, component_inchikey_dict) = (
make_mainlib_replicates_train_test_split(
mainlib_mol_list,
replicates_mol_list,
FLAGS.splitting_type,
main_train_val_test_fractions,
replicates_train_val_test_fractions,
mainlib_maximum_num_molecules_to_use=FLAGS.
mainlib_maximum_num_molecules_to_use,
replicates_maximum_num_molecules_to_use=FLAGS.
replicates_maximum_num_molecules_to_use))
# Writes TFRecords for each component using info from the main library file
write_mainlib_split_datasets(component_inchikey_dict, mainlib_inchikey_dict,
FLAGS.output_master_dir, FLAGS.max_atoms,
FLAGS.max_mass_spec_peak_loc)
# Writes TFRecords for each component using info from the replicates file
write_replicates_split_datasets(
component_inchikey_dict, replicates_inchikey_dict,
FLAGS.output_master_dir, FLAGS.max_atoms, FLAGS.max_mass_spec_peak_loc)
for experiment_setup in ds_constants.EXPERIMENT_SETUPS_LIST:
# Check that experiment setup is valid.
check_experiment_setup(experiment_setup.experiment_setup_dataset_dict,
component_inchikey_dict)
# Write a json for the experiment setups, pointing to local files.
write_json_for_experiment(experiment_setup, FLAGS.output_master_dir)
if __name__ == '__main__':
app.run(main)
| 42.566449
| 80
| 0.749718
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
from absl import app
from absl import flags
import dataset_setup_constants as ds_constants
import mass_spec_constants as ms_constants
import parse_sdf_utils
import train_test_split_utils
import six
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
'main_sdf_name', 'testdata/test_14_mend.sdf',
'specify full path of sdf file to parse, to be used for'
' training sets, and validation/test sets')
flags.DEFINE_string(
'replicates_sdf_name',
'testdata/test_2_mend.sdf',
'specify full path of a second sdf file to parse, to be'
' used for the vaildation/test set. Molecules in this sdf'
' will be excluded from the main train/val/test sets.')
flags.DEFINE_list(
'main_train_val_test_fractions', '1.0,0.0,0.0',
'specify how large to make the train, val, and test sets'
' as a fraction of the whole dataset.')
flags.DEFINE_integer('mainlib_maximum_num_molecules_to_use', None,
'specify how many total samples to use for parsing')
flags.DEFINE_integer('replicates_maximum_num_molecules_to_use', None,
'specify how many total samples to use for parsing')
flags.DEFINE_list(
'replicates_train_val_test_fractions', '0.0,0.5,0.5',
'specify fraction of replicates molecules to use for'
' for the three replicates sample files.')
flags.DEFINE_enum(
'splitting_type', 'random', ['random', 'steroid', 'diazo'],
'specify splitting method to use for creating '
'training/validation/test sets')
flags.DEFINE_string('output_master_dir', '/tmp/output_dataset_dir',
'specify directory to save records')
flags.DEFINE_integer('max_atoms', ms_constants.MAX_ATOMS,
'specify maximum number of atoms to allow')
flags.DEFINE_integer('max_mass_spec_peak_loc', ms_constants.MAX_PEAK_LOC,
'specify greatest m/z spectrum peak to allow')
INCHIKEY_FILENAME_END = '.inchikey.txt'
TFRECORD_FILENAME_END = '.tfrecord'
NP_LIBRARY_ARRAY_END = '.spectra_library.npy'
FROM_MAINLIB_FILENAME_MODIFIER = '_from_mainlib'
FROM_REPLICATES_FILENAME_MODIFIER = '_from_replicates'
def make_mainlib_replicates_train_test_split(
mainlib_mol_list,
replicates_mol_list,
splitting_type,
mainlib_fractions,
replicates_fractions,
mainlib_maximum_num_molecules_to_use=None,
replicates_maximum_num_molecules_to_use=None,
rseed=42):
random.seed(rseed)
main_inchikey_dict = train_test_split_utils.make_inchikey_dict(
mainlib_mol_list)
main_inchikey_list = main_inchikey_dict.keys()
if six.PY3:
main_inchikey_list = list(main_inchikey_list)
if mainlib_maximum_num_molecules_to_use is not None:
main_inchikey_list = random.sample(main_inchikey_list,
mainlib_maximum_num_molecules_to_use)
replicates_inchikey_dict = train_test_split_utils.make_inchikey_dict(
replicates_mol_list)
replicates_inchikey_list = replicates_inchikey_dict.keys()
if six.PY3:
replicates_inchikey_list = list(replicates_inchikey_list)
if replicates_maximum_num_molecules_to_use is not None:
replicates_inchikey_list = random.sample(
replicates_inchikey_list, replicates_maximum_num_molecules_to_use)
main_train_validation_test_inchikeys = (
train_test_split_utils.make_train_val_test_split_inchikey_lists(
main_inchikey_list,
main_inchikey_dict,
mainlib_fractions,
holdout_inchikey_list=replicates_inchikey_list,
splitting_type=splitting_type))
replicates_validation_test_inchikeys = (
train_test_split_utils.make_train_val_test_split_inchikey_lists(
replicates_inchikey_list,
replicates_inchikey_dict,
replicates_fractions,
splitting_type=splitting_type))
component_inchikey_dict = {
ds_constants.MAINLIB_TRAIN_BASENAME:
main_train_validation_test_inchikeys.train,
ds_constants.MAINLIB_VALIDATION_BASENAME:
main_train_validation_test_inchikeys.validation,
ds_constants.MAINLIB_TEST_BASENAME:
main_train_validation_test_inchikeys.test,
ds_constants.REPLICATES_TRAIN_BASENAME:
replicates_validation_test_inchikeys.train,
ds_constants.REPLICATES_VALIDATION_BASENAME:
replicates_validation_test_inchikeys.validation,
ds_constants.REPLICATES_TEST_BASENAME:
replicates_validation_test_inchikeys.test
}
train_test_split_utils.assert_all_lists_mutally_exclusive(
list(component_inchikey_dict.values()))
all_inchikeys_in_components = []
for ikey_list in list(component_inchikey_dict.values()):
for ikey in ikey_list:
all_inchikeys_in_components.append(ikey)
assert set(main_inchikey_list + replicates_inchikey_list) == set(
all_inchikeys_in_components
), ('The inchikeys in the original inchikey dictionary are not all included'
' in the train/val/test component libraries')
return (main_inchikey_dict, replicates_inchikey_dict, component_inchikey_dict)
def write_list_of_inchikeys(inchikey_list, base_name, output_dir):
inchikey_list_name = base_name + INCHIKEY_FILENAME_END
with tf.gfile.Open(os.path.join(output_dir, inchikey_list_name),
'w') as writer:
for inchikey in inchikey_list:
writer.write('%s\n' % inchikey)
def write_all_dataset_files(inchikey_dict,
inchikey_list,
base_name,
output_dir,
max_atoms,
max_mass_spec_peak_loc,
make_library_array=False):
record_name = base_name + TFRECORD_FILENAME_END
mol_list = train_test_split_utils.make_mol_list_from_inchikey_dict(
inchikey_dict, inchikey_list)
if make_library_array:
library_array_pathname = base_name + NP_LIBRARY_ARRAY_END
parse_sdf_utils.write_dicts_to_example(
mol_list, os.path.join(output_dir, record_name),
max_atoms, max_mass_spec_peak_loc,
os.path.join(output_dir, library_array_pathname))
else:
parse_sdf_utils.write_dicts_to_example(
mol_list, os.path.join(output_dir, record_name), max_atoms,
max_mass_spec_peak_loc)
write_list_of_inchikeys(inchikey_list, base_name, output_dir)
parse_sdf_utils.write_info_file(mol_list, os.path.join(
output_dir, record_name))
def write_mainlib_split_datasets(component_inchikey_dict, mainlib_inchikey_dict,
output_dir, max_atoms, max_mass_spec_peak_loc):
for component_kwarg in component_inchikey_dict.keys():
component_mainlib_filename = (
component_kwarg + FROM_MAINLIB_FILENAME_MODIFIER)
if component_kwarg == ds_constants.MAINLIB_TRAIN_BASENAME:
write_all_dataset_files(
mainlib_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_mainlib_filename,
output_dir,
max_atoms,
max_mass_spec_peak_loc,
make_library_array=True)
else:
write_all_dataset_files(mainlib_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_mainlib_filename, output_dir, max_atoms,
max_mass_spec_peak_loc)
def write_replicates_split_datasets(component_inchikey_dict,
replicates_inchikey_dict, output_dir,
max_atoms, max_mass_spec_peak_loc):
for component_kwarg in [
ds_constants.REPLICATES_VALIDATION_BASENAME,
ds_constants.REPLICATES_TEST_BASENAME
]:
component_replicates_filename = (
component_kwarg + FROM_REPLICATES_FILENAME_MODIFIER)
write_all_dataset_files(replicates_inchikey_dict,
component_inchikey_dict[component_kwarg],
component_replicates_filename, output_dir,
max_atoms, max_mass_spec_peak_loc)
def combine_inchikey_sets(dataset_subdivision_list, dataset_split_dict):
dataset_inchikey_list = []
for dataset_subdivision_name in dataset_subdivision_list:
dataset_inchikey_list.extend(dataset_split_dict[dataset_subdivision_name])
return dataset_inchikey_list
def check_experiment_setup(experiment_setup_dict, component_inchikey_dict):
all_inchikeys_in_library = (
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_OBSERVED_KEY],
component_inchikey_dict) +
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_PREDICTED_KEY],
component_inchikey_dict))
all_inchikeys_in_use = []
for kwarg in component_inchikey_dict.keys():
all_inchikeys_in_use.extend(component_inchikey_dict[kwarg])
assert set(all_inchikeys_in_use) == set(all_inchikeys_in_library), (
'Inchikeys in library for library matching does not match full dataset.')
assert set(
combine_inchikey_sets(
experiment_setup_dict[ds_constants.LIBRARY_MATCHING_QUERY_KEY],
component_inchikey_dict)).issubset(set(all_inchikeys_in_library)), (
'Inchikeys in query set for library matching not'
'found in library.')
def write_json_for_experiment(experiment_setup, output_dir):
experiment_json_dict = {}
for dataset_kwarg in experiment_setup.experiment_setup_dataset_dict:
if dataset_kwarg in experiment_setup.data_to_get_from_mainlib:
experiment_json_dict[dataset_kwarg] = [
(component_basename + FROM_MAINLIB_FILENAME_MODIFIER +
TFRECORD_FILENAME_END) for component_basename in
experiment_setup.experiment_setup_dataset_dict[dataset_kwarg]
]
elif dataset_kwarg in experiment_setup.data_to_get_from_replicates:
experiment_json_dict[dataset_kwarg] = [
(component_basename + FROM_REPLICATES_FILENAME_MODIFIER +
TFRECORD_FILENAME_END) for component_basename in
experiment_setup.experiment_setup_dataset_dict[dataset_kwarg]
]
else:
raise ValueError('Did not specify origin for {}.'.format(dataset_kwarg))
training_spectra_filename = (
ds_constants.MAINLIB_TRAIN_BASENAME + FROM_MAINLIB_FILENAME_MODIFIER +
NP_LIBRARY_ARRAY_END)
experiment_json_dict[
ds_constants.TRAINING_SPECTRA_ARRAY_KEY] = training_spectra_filename
with tf.gfile.Open(os.path.join(output_dir, experiment_setup.json_name),
'w') as writer:
experiment_json = json.dumps(experiment_json_dict)
writer.write(experiment_json)
def main(_):
tf.gfile.MkDir(FLAGS.output_master_dir)
main_train_val_test_fractions_tuple = tuple(
[float(elem) for elem in FLAGS.main_train_val_test_fractions])
main_train_val_test_fractions = train_test_split_utils.TrainValTestFractions(
*main_train_val_test_fractions_tuple)
replicates_train_val_test_fractions_tuple = tuple(
[float(elem) for elem in FLAGS.replicates_train_val_test_fractions])
replicates_train_val_test_fractions = (
train_test_split_utils.TrainValTestFractions(
*replicates_train_val_test_fractions_tuple))
mainlib_mol_list = parse_sdf_utils.get_sdf_to_mol(
FLAGS.main_sdf_name, max_atoms=FLAGS.max_atoms)
replicates_mol_list = parse_sdf_utils.get_sdf_to_mol(
FLAGS.replicates_sdf_name, max_atoms=FLAGS.max_atoms)
(mainlib_inchikey_dict, replicates_inchikey_dict, component_inchikey_dict) = (
make_mainlib_replicates_train_test_split(
mainlib_mol_list,
replicates_mol_list,
FLAGS.splitting_type,
main_train_val_test_fractions,
replicates_train_val_test_fractions,
mainlib_maximum_num_molecules_to_use=FLAGS.
mainlib_maximum_num_molecules_to_use,
replicates_maximum_num_molecules_to_use=FLAGS.
replicates_maximum_num_molecules_to_use))
write_mainlib_split_datasets(component_inchikey_dict, mainlib_inchikey_dict,
FLAGS.output_master_dir, FLAGS.max_atoms,
FLAGS.max_mass_spec_peak_loc)
write_replicates_split_datasets(
component_inchikey_dict, replicates_inchikey_dict,
FLAGS.output_master_dir, FLAGS.max_atoms, FLAGS.max_mass_spec_peak_loc)
for experiment_setup in ds_constants.EXPERIMENT_SETUPS_LIST:
check_experiment_setup(experiment_setup.experiment_setup_dataset_dict,
component_inchikey_dict)
write_json_for_experiment(experiment_setup, FLAGS.output_master_dir)
if __name__ == '__main__':
app.run(main)
| true
| true
|
790753d2a20891f5a1c30a760acc33fc3d5e20b7
| 1,151
|
py
|
Python
|
apis/authentication.py
|
n-ryan/spotify-genius
|
2b209ae30d5ec005bc7c616dc4a5297c996f9c0c
|
[
"MIT"
] | null | null | null |
apis/authentication.py
|
n-ryan/spotify-genius
|
2b209ae30d5ec005bc7c616dc4a5297c996f9c0c
|
[
"MIT"
] | null | null | null |
apis/authentication.py
|
n-ryan/spotify-genius
|
2b209ae30d5ec005bc7c616dc4a5297c996f9c0c
|
[
"MIT"
] | null | null | null |
def path_hack():
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
# print('path added:', sys.path[0])
path_hack()
import traceback
import sys
import urllib.request
from urllib.request import urlopen
import json
from apis import utilities
try:
from apis import my_token
API_TUTOR_TOKEN = my_token.API_TUTOR_TOKEN
except:
title = 'IMPORTANT: You Need an Access Token!'
error_message = '\n\n\n' + '*' * len(title) + '\n' + \
title + '\n' + '*' * len(title) + \
'\nPlease download the the my_token.py file and save it in your apis directory.\n\n'
raise Exception(error_message)
def get_token(url):
try:
response = urlopen(url + '?auth_manager_token=' + API_TUTOR_TOKEN)
data = response.read()
results = data.decode('utf-8', 'ignore')
return json.loads(results)['token']
except urllib.error.HTTPError as e:
# give a good error message:
error = utilities.get_error_message(e, url)
raise Exception(error)
| 30.289474
| 92
| 0.667246
|
def path_hack():
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
path_hack()
import traceback
import sys
import urllib.request
from urllib.request import urlopen
import json
from apis import utilities
try:
from apis import my_token
API_TUTOR_TOKEN = my_token.API_TUTOR_TOKEN
except:
title = 'IMPORTANT: You Need an Access Token!'
error_message = '\n\n\n' + '*' * len(title) + '\n' + \
title + '\n' + '*' * len(title) + \
'\nPlease download the the my_token.py file and save it in your apis directory.\n\n'
raise Exception(error_message)
def get_token(url):
try:
response = urlopen(url + '?auth_manager_token=' + API_TUTOR_TOKEN)
data = response.read()
results = data.decode('utf-8', 'ignore')
return json.loads(results)['token']
except urllib.error.HTTPError as e:
error = utilities.get_error_message(e, url)
raise Exception(error)
| true
| true
|
790753ff87e4b849763a8b5830d1c2c2af71a7d8
| 157
|
py
|
Python
|
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-Sienkowski99
|
d5167aaf4fe2af9771c65479b2dc3ea025f0cb97
|
[
"MIT"
] | null | null | null |
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-Sienkowski99
|
d5167aaf4fe2af9771c65479b2dc3ea025f0cb97
|
[
"MIT"
] | null | null | null |
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-Sienkowski99
|
d5167aaf4fe2af9771c65479b2dc3ea025f0cb97
|
[
"MIT"
] | null | null | null |
class Car:
def needFuel(self):
pass
def getEngineTemperature(self):
pass
def driveTo(self, destination):
pass
| 15.7
| 36
| 0.55414
|
class Car:
def needFuel(self):
pass
def getEngineTemperature(self):
pass
def driveTo(self, destination):
pass
| true
| true
|
790754d59ff89f42e48d824a964cfb37783fd6de
| 565
|
py
|
Python
|
confab/generate.py
|
locationlabs/confab
|
a39c3d7aae11b2f373b8911b4f3caa75548a00c6
|
[
"Apache-2.0"
] | 3
|
2015-01-23T09:39:25.000Z
|
2022-02-25T15:47:26.000Z
|
confab/generate.py
|
locationlabs/confab
|
a39c3d7aae11b2f373b8911b4f3caa75548a00c6
|
[
"Apache-2.0"
] | null | null | null |
confab/generate.py
|
locationlabs/confab
|
a39c3d7aae11b2f373b8911b4f3caa75548a00c6
|
[
"Apache-2.0"
] | 1
|
2021-09-08T09:52:11.000Z
|
2021-09-08T09:52:11.000Z
|
"""
Generate configuration files into :ref:`generated_dir<directories>`.
"""
from fabric.api import task
from gusset.output import status
from gusset.validation import with_validation
from confab.iter import iter_conffiles
@task
@with_validation
def generate(directory=None):
"""
Generate configuration files.
"""
for conffiles in iter_conffiles(directory):
status("Generating templates for '{environment}' and '{role}'",
environment=conffiles.environment,
role=conffiles.role)
conffiles.generate()
| 24.565217
| 71
| 0.707965
|
from fabric.api import task
from gusset.output import status
from gusset.validation import with_validation
from confab.iter import iter_conffiles
@task
@with_validation
def generate(directory=None):
for conffiles in iter_conffiles(directory):
status("Generating templates for '{environment}' and '{role}'",
environment=conffiles.environment,
role=conffiles.role)
conffiles.generate()
| true
| true
|
79075683e9254d743d0fbbc09e7d2e31b9cfca13
| 92,842
|
py
|
Python
|
ibis/expr/operations.py
|
patcao/ibis
|
661bbd20081285f3c29267793f3d070d0c8a0db8
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/operations.py
|
patcao/ibis
|
661bbd20081285f3c29267793f3d070d0c8a0db8
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/operations.py
|
patcao/ibis
|
661bbd20081285f3c29267793f3d070d0c8a0db8
|
[
"Apache-2.0"
] | null | null | null |
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not “spatially intersect” -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography “spatially intersect in 2D”
- (share any portion of space) and False if they don’t (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
| 24.984392
| 99
| 0.624954
|
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
if cache is None:
cache = {}
if util.is_iterable(left):
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op())
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
lf):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
class Log2(Logarithm):
class Log10(Logarithm):
class Degrees(ExpandingTypeMathUnaryOp):
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
arg = Arg(rlz.numeric)
class TrigonometricUnary(MathUnaryOp):
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
class Asin(TrigonometricUnary):
class Atan(TrigonometricUnary):
class Atan2(TrigonometricBinary):
class Cos(TrigonometricUnary):
class Cot(TrigonometricUnary):
class Sin(TrigonometricUnary):
class Tan(TrigonometricUnary):
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
class Lowercase(StringUnaryOp):
class Reverse(StringUnaryOp):
class Strip(StringUnaryOp):
class LStrip(StringUnaryOp):
class RStrip(StringUnaryOp):
class Capitalize(StringUnaryOp):
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class Distinct(TableNode, HasSchema):
table = Arg(ir.TableExpr)
def _validate(self):
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
class Any(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
return type(self)(cases, results, self.default)
class Where(ValueOp):
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
assert self.schema
@cached_property
def schema(self):
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
options = ir.sequence(options)
else:
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
| true
| true
|
79075800351ca9f9e53c3cd71af9d1e12f0995ef
| 996
|
py
|
Python
|
ex026.py
|
WesleyOlliver/CursoPython
|
8decdc4f38c25429994c0f9cb8f206e167f161d6
|
[
"MIT"
] | null | null | null |
ex026.py
|
WesleyOlliver/CursoPython
|
8decdc4f38c25429994c0f9cb8f206e167f161d6
|
[
"MIT"
] | null | null | null |
ex026.py
|
WesleyOlliver/CursoPython
|
8decdc4f38c25429994c0f9cb8f206e167f161d6
|
[
"MIT"
] | null | null | null |
cor = {'traço': '\033[35m', 'ex': '\033[4;31m', 'título': '\033[1;34m', 'str': '\033[1;33m', 'reset': '\033[m'}
print('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 026 {}'.format(cor['ex'], cor['reset']),
'{}-=-{}'.format(cor['traço'], cor['reset'])*18)
print('{}Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra "A", em que posição '
'ela aparece a \nprimeira vez e em que posição ela aparece a última vez.{}'.format(cor['título'], cor['reset']))
print('{}-=-{}'.format(cor['traço'], cor['reset'])*42)
frase = str(input('Digite uma frase: ')).strip().upper()
print('A letra "A" aparece {}{}{} vezes na frase.'.format(cor['str'], frase.count('A'), cor['reset']))
print('A primeira vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.find('A') + 1, cor['reset']))
print('A última vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.rfind('A') + 1, cor['reset']))
| 76.615385
| 118
| 0.593373
|
cor = {'traço': '\033[35m', 'ex': '\033[4;31m', 'título': '\033[1;34m', 'str': '\033[1;33m', 'reset': '\033[m'}
print('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 026 {}'.format(cor['ex'], cor['reset']),
'{}-=-{}'.format(cor['traço'], cor['reset'])*18)
print('{}Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra "A", em que posição '
'ela aparece a \nprimeira vez e em que posição ela aparece a última vez.{}'.format(cor['título'], cor['reset']))
print('{}-=-{}'.format(cor['traço'], cor['reset'])*42)
frase = str(input('Digite uma frase: ')).strip().upper()
print('A letra "A" aparece {}{}{} vezes na frase.'.format(cor['str'], frase.count('A'), cor['reset']))
print('A primeira vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.find('A') + 1, cor['reset']))
print('A última vez que a letra "A" apareceu foi na posição {}{}{}.'
.format(cor['str'], frase.rfind('A') + 1, cor['reset']))
| true
| true
|
790758766559dd3e8276554f9e97a158f5ec032d
| 216
|
py
|
Python
|
src/spaceone/repository/model/__init__.py
|
whdalsrnt/repository
|
4d019c21508629faae7a7e2789bf540a3bab0e20
|
[
"Apache-2.0"
] | 6
|
2020-06-04T23:00:33.000Z
|
2020-08-10T02:45:43.000Z
|
src/spaceone/repository/model/__init__.py
|
whdalsrnt/repository
|
4d019c21508629faae7a7e2789bf540a3bab0e20
|
[
"Apache-2.0"
] | 4
|
2020-10-09T07:02:27.000Z
|
2022-02-28T04:43:01.000Z
|
src/spaceone/repository/model/__init__.py
|
whdalsrnt/repository
|
4d019c21508629faae7a7e2789bf540a3bab0e20
|
[
"Apache-2.0"
] | 6
|
2020-06-01T10:10:57.000Z
|
2021-10-05T03:03:00.000Z
|
from spaceone.repository.model.plugin_model import *
from spaceone.repository.model.schema_model import *
from spaceone.repository.model.policy_model import *
from spaceone.repository.model.repository_model import *
| 43.2
| 56
| 0.851852
|
from spaceone.repository.model.plugin_model import *
from spaceone.repository.model.schema_model import *
from spaceone.repository.model.policy_model import *
from spaceone.repository.model.repository_model import *
| true
| true
|
790758cf4b935b5fb25f54292538f97f68e0b881
| 299
|
py
|
Python
|
instagram/forms.py
|
cossie14/Slygram
|
9df33f4c50b2b1fd35830637fb637c8c81d603c0
|
[
"MIT"
] | null | null | null |
instagram/forms.py
|
cossie14/Slygram
|
9df33f4c50b2b1fd35830637fb637c8c81d603c0
|
[
"MIT"
] | 4
|
2021-03-19T03:16:45.000Z
|
2021-09-08T01:15:53.000Z
|
instagram/forms.py
|
sylviahjepkosgei/Slygram
|
9df33f4c50b2b1fd35830637fb637c8c81d603c0
|
[
"MIT"
] | 1
|
2020-11-04T08:42:08.000Z
|
2020-11-04T08:42:08.000Z
|
from django import forms
from .models import Image, Comments
#......
class NewStoryForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image', 'image_caption')
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ('comment',)
| 23
| 43
| 0.64214
|
from django import forms
from .models import Image, Comments
class NewStoryForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image', 'image_caption')
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ('comment',)
| true
| true
|
7907593c3d7e230e9d0a30def6478592c661d0da
| 5,228
|
py
|
Python
|
test/test_model_conversion.py
|
huggingface/neural-compressor
|
aaad4c357a86914ffa583753c9a26d949838a2a5
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
test/test_model_conversion.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
test/test_model_conversion.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#
# -*- coding: utf-8 -*-
#
import unittest
import os
import shutil
import yaml
import tensorflow as tf
from neural_compressor.experimental import model_conversion
tf.compat.v1.enable_eager_execution()
from tensorflow import keras
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
device: cpu
model_conversion:
source: qat
destination: default
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def prepare_dataset():
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels
def prepare_model(model_out_path, train_images, train_labels):
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
model.save(model_out_path)
def prepare_qat_model(model_in_path, model_out_path, train_images, train_labels):
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
model = tf.keras.models.load_model(model_in_path)
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
q_aware_model.save(model_out_path)
@unittest.skipIf(tf.version.VERSION < '2.4.0', "Only supports tf 2.4.0 or above")
class TestModelConversion(unittest.TestCase):
@classmethod
def setUpClass(self):
self._baseline_temp_path = './temp_baseline'
self._qat_temp_path = './temp_qat'
self._quantized_temp_path = './temp_quantized'
build_fake_yaml()
train_images, train_labels = prepare_dataset()
prepare_model(self._baseline_temp_path, train_images, train_labels)
prepare_qat_model(self._baseline_temp_path, self._qat_temp_path, train_images, train_labels)
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
shutil.rmtree(self._qat_temp_path, ignore_errors=True)
shutil.rmtree(self._baseline_temp_path, ignore_errors=True)
shutil.rmtree(self._quantized_temp_path, ignore_errors=True)
def test_model_conversion(self):
from neural_compressor.experimental import ModelConversion, common
from neural_compressor.conf.config import Conf
conversion = ModelConversion()
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
q_model.save(self._quantized_temp_path)
conf = Conf('fake_yaml.yaml')
conversion = ModelConversion(conf)
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
conversion = ModelConversion('fake_yaml.yaml')
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
graph = tf.compat.v1.Graph()
with graph.as_default():
with tf.compat.v1.Session() as sess:
meta_graph=tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], self._quantized_temp_path)
print(meta_graph.graph_def.node)
for i in meta_graph.graph_def.node:
if 'MatMul' in i.op:
self.assertTrue('QuantizedMatMul' in i.op)
if 'MaxPool' in i.op:
self.assertTrue('QuantizedMaxPool' in i.op)
if 'Conv2D' in i.op:
self.assertTrue('QuantizedConv2D' in i.op)
if __name__ == "__main__":
unittest.main()
| 35.808219
| 146
| 0.666985
|
import unittest
import os
import shutil
import yaml
import tensorflow as tf
from neural_compressor.experimental import model_conversion
tf.compat.v1.enable_eager_execution()
from tensorflow import keras
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
device: cpu
model_conversion:
source: qat
destination: default
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def prepare_dataset():
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels
def prepare_model(model_out_path, train_images, train_labels):
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
model.save(model_out_path)
def prepare_qat_model(model_in_path, model_out_path, train_images, train_labels):
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
model = tf.keras.models.load_model(model_in_path)
q_aware_model = quantize_model(model)
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images_subset = train_images[0:1000]
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
q_aware_model.save(model_out_path)
@unittest.skipIf(tf.version.VERSION < '2.4.0', "Only supports tf 2.4.0 or above")
class TestModelConversion(unittest.TestCase):
@classmethod
def setUpClass(self):
self._baseline_temp_path = './temp_baseline'
self._qat_temp_path = './temp_qat'
self._quantized_temp_path = './temp_quantized'
build_fake_yaml()
train_images, train_labels = prepare_dataset()
prepare_model(self._baseline_temp_path, train_images, train_labels)
prepare_qat_model(self._baseline_temp_path, self._qat_temp_path, train_images, train_labels)
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
shutil.rmtree(self._qat_temp_path, ignore_errors=True)
shutil.rmtree(self._baseline_temp_path, ignore_errors=True)
shutil.rmtree(self._quantized_temp_path, ignore_errors=True)
def test_model_conversion(self):
from neural_compressor.experimental import ModelConversion, common
from neural_compressor.conf.config import Conf
conversion = ModelConversion()
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
q_model.save(self._quantized_temp_path)
conf = Conf('fake_yaml.yaml')
conversion = ModelConversion(conf)
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
conversion = ModelConversion('fake_yaml.yaml')
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
graph = tf.compat.v1.Graph()
with graph.as_default():
with tf.compat.v1.Session() as sess:
meta_graph=tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], self._quantized_temp_path)
print(meta_graph.graph_def.node)
for i in meta_graph.graph_def.node:
if 'MatMul' in i.op:
self.assertTrue('QuantizedMatMul' in i.op)
if 'MaxPool' in i.op:
self.assertTrue('QuantizedMaxPool' in i.op)
if 'Conv2D' in i.op:
self.assertTrue('QuantizedConv2D' in i.op)
if __name__ == "__main__":
unittest.main()
| true
| true
|
79075957d9d4402065718b9f2cbe06ccf8dcc00a
| 2,603
|
py
|
Python
|
MNIST/mnist.py
|
VitorGDellino/Neural-Network
|
fb2ff335656145d385c0fbf6e68b0840efe51dd2
|
[
"MIT"
] | null | null | null |
MNIST/mnist.py
|
VitorGDellino/Neural-Network
|
fb2ff335656145d385c0fbf6e68b0840efe51dd2
|
[
"MIT"
] | null | null | null |
MNIST/mnist.py
|
VitorGDellino/Neural-Network
|
fb2ff335656145d385c0fbf6e68b0840efe51dd2
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
#print(tf.__version__)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
print(predictions[0])
"""num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
"""
| 26.292929
| 84
| 0.684211
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
print(predictions[0])
| true
| true
|
79075af075309d228e4ce168b304d83bcc1918c6
| 26,784
|
py
|
Python
|
TensorMonitor/control_panel.py
|
octaviaguo/Tensorflow-Visualizing
|
f96356d5fea4c4394193ac098ae75026a3cfb714
|
[
"MIT"
] | 15
|
2018-08-23T18:01:03.000Z
|
2021-10-01T03:05:49.000Z
|
TensorMonitor/control_panel.py
|
octaviaguo/Tensorflow-Visualizing
|
f96356d5fea4c4394193ac098ae75026a3cfb714
|
[
"MIT"
] | null | null | null |
TensorMonitor/control_panel.py
|
octaviaguo/Tensorflow-Visualizing
|
f96356d5fea4c4394193ac098ae75026a3cfb714
|
[
"MIT"
] | null | null | null |
import os
import sys
import copy as copy
from tensor_view_1d import TensorView1D
from tensor_view_2d import TensorView2D
from tensor_view_act import TensorViewAct
from tensor_view_filter import TensorViewFilter
from tensor_data import TensorData
import inspect
from PyQt4 import QtGui, QtCore
from pyqt_env import PyQTEnv
import xml.etree.ElementTree as ET
TEST_WATERFALL_VIEW = False
gui_root_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
class MainWindow(QtGui.QMainWindow):
def __init__(self, args):
super(MainWindow, self).__init__()
self.setGeometry(1400,70,600,370)
self.setWindowTitle("VISUALIZATION")
self.action_cb = args
#self.tensor_input_list = args['tensor_input_list']
quitAction = QtGui.QAction('Quit', self)
quitAction.triggered.connect(self.close_application)
saveAction = QtGui.QAction('Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.triggered.connect(self.save_WatchList)
loadAction = QtGui.QAction('Open File...', self)
loadAction.setShortcut('Ctrl+O')
loadAction.triggered.connect(self.action_cb['load_WatchList'])
input_file = QtGui.QAction('Open input file', self)
input_file.setShortcut('Ctrl+I')
input_file.triggered.connect(self.open_input_file)
menu = self.menuBar()
filemenu = menu.addMenu('&File')
filemenu.addAction(saveAction)
filemenu.addAction(loadAction)
filemenu.addAction(input_file)
self.toolBar = self.addToolBar("ToolBar")
self.toolBar.addAction(quitAction)
self.create_sub_windows()
def create_sub_windows(self):
pausecheck = QtGui.QCheckBox('Pause', self)
pausecheck.move(520,120)
pausecheck.toggle()
pausecheck.stateChanged.connect(self.action_cb['on_pause'])
self.step_btn = QtGui.QPushButton("Step",self)
self.step_btn.setStyleSheet("color: blue; font: bold 14px")
self.step_btn.resize(50,25)
self.step_btn.move(520,80)
self.step_btn.clicked.connect(self.action_cb['on_step'])
self.watch_com = QtGui.QLabel(self)
self.watch_com.setText('Watch :')
self.watch_com.move(520,244)
self.watch_com.setFont(QtGui.QFont("Times",13,weight=QtGui.QFont.Bold))
self.watch_choice = QtGui.QComboBox(self)
self.watch_choice.setStyleSheet("font: bold 14px")
self.watch_choice.move(520,280)
self.watch_choice.addItem('1-DIM')
self.watch_choice.addItem('2-DIM')
self.watch_choice.addItem('Activation')
self.watch_choice.addItem('Filter')
self.watch_choice.resize(70,30)
self.watch_choice.show()
self.watch_choice.activated[str].connect(self.action_cb['on_add_watch'])
self.showbtn = QtGui.QCheckBox('Show',self)
self.showbtn.move(520,195)
self.showbtn.toggle()
self.showbtn.hide()
self.showbtn.stateChanged.connect(self.action_cb['on_set_show'])
self.show_remove_btn = QtGui.QPushButton("Remove",self)
self.show_remove_btn.setStyleSheet("color: red; font: bold 14px")
self.show_remove_btn.resize(70,30)
self.show_remove_btn.move(520,240)
self.show_remove_btn.hide()
self.show_remove_btn.clicked.connect(self.action_cb['on_remove_watch'])
self.hd_all_btn = QtGui.QPushButton("Hide All",self)
self.hd_all_btn.setStyleSheet("color: red; font: bold 14px")
self.hd_all_btn.resize(84,30)
self.hd_all_btn.move(510,280)
self.hd_all_btn.hide()
self.hd_all_btn.clicked.connect(self.action_cb['on_hide_all'])
self.tensor_label = QtGui.QLabel(self)
self.tensor_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_label.setGeometry(QtCore.QRect(80,180,200,20))
self.tensor_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.tensor_reshape_label = QtGui.QLabel(self)
self.tensor_reshape_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_reshape_label.setGeometry(QtCore.QRect(80,220,200,20))
self.tensor_reshape_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.reshape_inlb = QtGui.QLabel(self)
self.reshape_inlb.move(80,220)
self.reshape_inlb.setText('Reshape: ')
self.reshape_inlb.setFont(QtGui.QFont('Times',12,weight=QtGui.QFont.Bold))
self.tensor_shape_input = QtGui.QLineEdit(self)
self.tensor_shape_input.textChanged.connect(self.action_cb['on_tensor_shape_input'])
self.tensor_shape_input.move(160,220)
self.sourceInput_list = QtGui.QComboBox(self)
self.sourceInput_list.move(160,270)
self.sourceInput_list.activated[str].connect(self.action_cb['on_input_select'])
listcombo = QtGui.QComboBox(self)
listcombo.addItem("Select List")
listcombo.addItem("Watch List")
listcombo.move(50,100)
subcombo = QtGui.QComboBox(self)
subcombo.addItem('USER_LIST')
subcombo.addItem('TRAINABLE_VARIABLES')
subcombo.addItem('ACTIVATIONS')
subcombo.addItem('GLOBAL_VARIABLES')
subcombo.addItem('ALL_OPS')
subcombo.move(180,100)
listcombo.activated[str].connect(self.action_cb['on_list_type_select'])
subcombo.activated[str].connect(self.action_cb['on_filter_type_select'])
self.create_list_view()
fontset = QtGui.QFont()
fontset.setPointSize(12)
self.filter_comment = QtGui.QLabel(self)
self.filter_comment.setText('Search Only in ALL_OPS:')
self.filter_comment.setGeometry(QtCore.QRect(100,34,180,25))
self.filter_comment.setFont(fontset)
self.filter_in = QtGui.QLineEdit(self)
self.filter_in.textChanged.connect(self.action_cb['on_filter_str_input'])
self.filter_in.move(290,30)
self.filter_in.resize(190,40)
self.show()
def create_list_view(self):
self.list_view=QtGui.QListView(self)
self.list_view.main = self
self.list_view.setEditTriggers(QtGui.QListView.NoEditTriggers)
self.list_view.setMouseTracking(True)
self.list_model = QtGui.QStandardItemModel()
self.list_view.setModel(self.list_model)
entries = [str(i) for i in range(50)]
for i in entries:
item = QtGui.QStandardItem(i)
self.list_model.appendRow(item)
self.list_view.setMinimumSize(170,200)
self.list_view.move(310,130)
self.list_view.clicked.connect(self.action_cb['on_tensor_select'])
def close_application(self):
choice = QtGui.QMessageBox.question(self, 'Warning',
"Do you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_close']()
else:
pass
def save_WatchList(self):
choice = QtGui.QMessageBox.question(self, '',
"Do you want to save the watch_list?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_save']()
else:
pass
def update_tensor_list(self, list_type, list, pos, reset_pos):
items_str = [t.disp_name for t in list]
self.list_model.clear()
for text in items_str:
item = QtGui.QStandardItem(text)
self.list_model.appendRow(item)
def open_input_file(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open input file')
input_file = open(name, 'r')
DIYname = QtGui.QInputDialog.getText(self, 'Name your input choice', None)
save_name = DIYname[0]
self.action_cb['add_input'](save_name, input_file.name)
def update_input_list(self, input_list):
self.sourceInput_list.clear()
for item in input_list:
self.sourceInput_list.addItem(item.name)
def enable_filter_input(self, enable):
if enable is False:
self.filter_in.setDisabled(True)
else:
self.filter_in.setDisabled(False)
class TensorItem(object):
def __init__(self, name, shape, op, input_name):
self.name = name
self.op = op
self.input_name = input_name
#self.data_source = TensorData(start_step=ControlPanel.step_count)
self.disp_name = name
try:
shape_str = '(' + ', '.join(map(str, shape)) + ')'
self.shape_str = shape_str
self.reshape = []
except: #TypeError: #fix for python3
self.shape_str = ""
self.reshape = []
####
#self.pyqt_window_id = None
#self.view = None
def copy(self, obj):
self.name = copy.copy(obj.name)
self.input_name = copy.copy(obj.input_name)
self.op = obj.op
self.disp_name = copy.copy(obj.disp_name)
self.shape_str = copy.copy(obj.shape_str)
self.reshape = copy.copy(obj.reshape)
def get_reshape_str(self):
return ', '.join(map(str, self.reshape))
class ControlPanel(object):
quit = False
pause = True
single_step_flag = False
step_count = 0
cur_list_type = 0
cur_filter_type_index = 0
tensor_select_list = []
select_list_cur_pos = 0
tensor_watch_list = []
watch_list_cur_pos = 0
tensor_input_list = []
console_cmd_list = []
pyqt_env = None
class TensorSelectItem(TensorItem):
def __init__(self, name, shape, op, input_name):
TensorItem.__init__(self, name, shape, op, input_name)
class TensorWatchItem(TensorItem):
def __init__(self, tensor_select_item):
self.showstate = True
self.copy(tensor_select_item)
self.data_source = TensorData(start_step=ControlPanel.step_count)
self.pyqt_window_id = None
self.picDIM = '1-DIM'
class TensorInputItem(object):
def __init__(self, name, input_obj):
self.name = name
self.input_obj = input_obj
"""
tensor panel
"""
def __open_tensor_view(self, index, text):
tensor_item = self.tensor_watch_list[index]
tensor_item.pyqt_window_id = self.pyqt_env.get_free_identity()
if text == '2-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView2D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = '2-DIM'
elif text == '1-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView1D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name})
self.tensor_watch_list[index].picDIM = '1-DIM'
elif text == 'Activation':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewAct,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Activation'
elif text == 'Filter':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewFilter,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Filter'
def __close_tensor_view(self, index):
tensor_item = self.tensor_watch_list[index]
if tensor_item.pyqt_window_id is not None:
self.pyqt_env.close(tensor_item.pyqt_window_id)
tensor_item.pyqt_window_id = None
def __close_all_tensor_views(self):
for i in range(len(self.tensor_watch_list)):
self.__close_tensor_view(i)
def __on_tensor_shape_input(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
dims = text.split(',')
titem.reshape = []
for dim in dims:
try:
titem.reshape.append(int(dim))
except ValueError:
pass
def __on_add_watch(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
new_titem = self.TensorWatchItem(titem)
"""
new_titem = copy.copy(titem) #shallow copy
new_titem.reshape = copy.copy(titem.reshape)
"""
self.tensor_watch_list.append(new_titem)
index = len(self.tensor_watch_list)-1
self.__open_tensor_view(index,text)
def __on_remove_watch(self):
self.__close_tensor_view(self.watch_list_cur_pos)
del self.tensor_watch_list[self.watch_list_cur_pos]
item_num = len(self.tensor_watch_list)
if self.watch_list_cur_pos >= item_num and item_num > 0:
self.watch_list_cur_pos = item_num-1
if self.cur_list_type==0:
list = self.tensor_select_list
pos = self.select_list_cur_pos
else:
list = self.tensor_watch_list
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=list, pos=pos, reset_pos=False)
def __on_set_show(self, state):
if state == QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == False:
self.__open_tensor_view(self.watch_list_cur_pos, self.tensor_watch_list[self.watch_list_cur_pos].picDIM)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = True
if state != QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == True:
self.__close_tensor_view(self.watch_list_cur_pos)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = False
def __on_input_select(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
titem.input_name = text
input_obj = self.__get_input_obj(text)
if input_obj is not None:
input_obj.show()
def __on_tensor_select(self, index):
index = index.row()
if self.cur_list_type == 0:
self.select_list_cur_pos = index
list = self.tensor_select_list
print(list[index].shape_str)
else:
self.watch_list_cur_pos = index
list = self.tensor_watch_list
if self.tensor_watch_list[index].showstate == False:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.setChecked(True)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(list[index].get_reshape_str())+')')
self.main_window.tensor_label.setText('Shape: '+list[index].shape_str)
"""
global control
"""
def __on_list_type_select(self, text):
if text == 'Select List':
index = 0
else:
index = 1
if index != self.cur_list_type:
if index == 0:
self.main_window.enable_filter_input(True)
else:
self.main_window.enable_filter_input(False)
self.cur_list_type = index
self.on_switch_btn(self.cur_list_type)
if self.cur_list_type == 0:
pos = self.select_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=pos, reset_pos=False)
else:
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_watch_list, pos=pos, reset_pos=False)
def on_switch_btn(self,index):
if index == 0:
self.main_window.watch_choice.show()
self.main_window.show_remove_btn.hide()
self.main_window.hd_all_btn.hide()
self.main_window.showbtn.hide()
self.main_window.watch_com.show()
self.main_window.tensor_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_select_list[0].shape_str)
self.main_window.tensor_shape_input.show()
self.main_window.reshape_inlb.show()
self.main_window.tensor_shape_input.clear()
self.main_window.tensor_reshape_label.hide()
else:
self.main_window.watch_choice.hide()
self.main_window.show_remove_btn.show()
self.main_window.hd_all_btn.show()
self.main_window.watch_com.hide()
self.main_window.tensor_shape_input.hide()
if self.tensor_watch_list != []:
self.main_window.showbtn.show()
self.main_window.tensor_label.show()
self.main_window.tensor_reshape_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_watch_list[0].shape_str)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(self.tensor_watch_list[0].get_reshape_str())+')')
if self.tensor_watch_list[0].showstate == True:
self.main_window.showbtn.setChecked(True)
else:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.hide()
self.main_window.tensor_label.hide()
self.main_window.tensor_reshape_label.hide()
self.main_window.reshape_inlb.hide()
def __on_filter_type_select(self, text):
pwd = {'USER_LIST':0, 'TRAINABLE_VARIABLES':1, 'ACTIVATIONS':2, 'GLOBAL_VARIABLES':3, 'ALL_OPS':4 }
self.cur_filter_type_index = pwd[text]
if pwd[text] == 2:
pass
else:
pass
def __on_filter_str_input(self, text):
text = str(text)
self.filter_str = text.strip()
def __on_pause(self, state):
if state == QtCore.Qt.Checked:
self.pause = True
else:
self.pause = False
print(self.pause)
def __on_step(self):
self.pause = True
self.single_step_flag = True
def __on_hide_all(self):
self.__close_all_tensor_views()
self.main_window.showbtn.hide()
def __on_console_str_input(self):
return
cmd = copy.copy(text.strip())
self.console_cmd_list.append(cmd)
def __on_close(self):
self.quit = True
def __on_save(self):
NoWatchItem = len(self.tensor_watch_list)
watchlist = [None]*NoWatchItem
root = ET.Element('root')
for i in range(NoWatchItem):
watchlist[i] = ET.SubElement(root, 'Item'+str(i+1))
name = ET.SubElement(watchlist[i], 'name')
shape = ET.SubElement(watchlist[i], 'shape')
reshape = ET.SubElement(watchlist[i], 'reshape')
visType = ET.SubElement(watchlist[i], 'visType')
win_x = ET.SubElement(watchlist[i], 'win_x')
win_y = ET.SubElement(watchlist[i], 'win_y')
win_w = ET.SubElement(watchlist[i], 'win_w')
win_h = ET.SubElement(watchlist[i], 'win_h')
name.text = self.tensor_watch_list[i].name
shape.text = self.tensor_watch_list[i].shape_str
reshape.text = self.tensor_watch_list[i].reshape
visType.text = self.tensor_watch_list[i].picDIM
(x,y,w,h) = self.pyqt_env.get_win_pos_size(self.tensor_watch_list[i].pyqt_window_id)
win_x.text = str(x)
win_y.text = str(y)
win_w.text = str(w)
win_h.text = str(h)
my = ET.tostring(root)
myfile = open('Saved_WatchList.xml', 'wb')
myfile.write(my)
def __load_WatchList(self):
tree = ET.parse('Saved_WatchList.xml')
root = tree.getroot()
count = len(self.tensor_watch_list)
print(count)
for elem in root:
n = elem[0].text
for t in self.all_ops:
if t.name == n:
tem_select = self.TensorSelectItem(t.name, t.shape, t.op, self.tensor_input_list[0].name)
new = self.TensorWatchItem(tem_select)
self.tensor_watch_list.append(new)
print('now',len(self.tensor_watch_list), 'but count: ', count)
self.__open_tensor_view(count, elem[3].text)
self.pyqt_env.set_win_pos_size(self.tensor_watch_list[count].pyqt_window_id, \
int(elem[4].text),int(elem[5].text),int(elem[6].text),int(elem[7].text))
break
count += 1
def __create_main_window(self, args):
self.main_window = MainWindow(
{
'filter_type_list':self.filter_type_list,
'tensor_input_list': self.tensor_input_list,
'on_close':self.__on_close,
'on_save':self.__on_save,
# global control
'on_pause':self.__on_pause,
'on_step':self.__on_step,
'on_hide_all':self.__on_hide_all,
'on_console_str_input':self.__on_console_str_input,
'on_filter_type_select':self.__on_filter_type_select,
'on_filter_str_input':self.__on_filter_str_input,
'on_list_type_select':self.__on_list_type_select,
##
'on_tensor_select':self.__on_tensor_select,
# tensor select panel
'on_tensor_shape_input':self.__on_tensor_shape_input,
'on_input_select':self.__on_input_select,
# tensor watch panel
'on_remove_watch':self.__on_remove_watch,
'on_add_watch':self.__on_add_watch,
'on_set_show':self.__on_set_show,
'load_WatchList':self.__load_WatchList,
'add_input':self.__add_input
}
)
return None
def __init__(self, filter_type_list, input_list, loaded_list):
for input_name in input_list:
self.tensor_input_list.append(self.TensorInputItem(input_name, None))
self.filter_str = ""
self.filter_type_list = filter_type_list
self.pyqt_env = PyQTEnv()
self.pyqt_env.run(self.__create_main_window, None)
self.main_window.update_input_list(self.tensor_input_list)
print('control_panel _init')
self.all_ops = loaded_list
### add_input test
#for test/alexnet
#self.__add_input('img_input')
#for test/basic_test
#self.__add_input('test_input')
#self.pyqt_env.run(self.__load_input, None)
'''
def __load_input(self, args):
### add_input test
#for test/alexnet
self.__add_input('my_img_input', '../alexnet/img_input.py')
#for test/basic_test
self.__add_input('test_input', '../basic_test/test_input.py')
'''
def __get_input_obj(self, name):
for input_item in self.tensor_input_list:
if input_item.name == name:
return input_item.input_obj
return None
def __add_input(self, input_name, filename, config_dict={}):
import importlib
try:
placeholder_dict={}
for t in self.all_ops:
if t.op.op.type == 'Placeholder':
placeholder_dict[t.name] = t.op
names = os.path.split(os.path.abspath(filename))
path = names[0]
module_name = names[1].split('.')[-2]
print('* input_name is: %s, filename is: %s'%(input_name, filename))
print('* config_dict is:', config_dict)
print('* module path is: %s, name is: %s'%(path, module_name))
#add module search path
sys.path.append(path)
temp_module = importlib.import_module(module_name)
input_obj = temp_module.TensorInput(placeholder_dict, config_dict)
input_obj.show()
input_item = self.TensorInputItem(input_name, input_obj)
self.tensor_input_list.append(input_item)
self.main_window.update_input_list(self.tensor_input_list)
except Exception as e:
print('Add_input error:', e)
"""
public methods
"""
def update_tensor_list(self, tensor_list):
self.tensor_select_list = []
for t in tensor_list:
if len(self.tensor_input_list)>0:
input_name = self.tensor_input_list[0].name
else:
input_name = ''
self.tensor_select_list.append(self.TensorSelectItem(t[0], t[1], t[2], input_name))
if self.cur_list_type == 0:
self.select_list_cur_pos = 0
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=0, reset_pos=True)
def get_tensor_watch_list(self):
dict = {}
for input_item in self.tensor_input_list:
list = []
for t in self.tensor_watch_list:
if t.pyqt_window_id is not None and input_item.name == t.input_name:
list.append((t.name, t.reshape, t.op, t.data_source, t.input_name))
if len(list)>0:
dict[input_item] = list
return dict
def beat(self, update_step_flag):
if update_step_flag:
self.single_step_flag = False
ControlPanel.step_count += 1
if self.quit:
self.pyqt_env.quit()
return not self.quit
def is_pause(self):
return self.pause
def is_step(self):
return self.single_step_flag
def get_filter_type(self):
return [self.filter_type_list[self.cur_filter_type_index], self.filter_str]
def get_console_command(self):
if len(self.console_cmd_list)>0:
cmd = self.console_cmd_list.pop()
return cmd
| 40.035874
| 144
| 0.614658
|
import os
import sys
import copy as copy
from tensor_view_1d import TensorView1D
from tensor_view_2d import TensorView2D
from tensor_view_act import TensorViewAct
from tensor_view_filter import TensorViewFilter
from tensor_data import TensorData
import inspect
from PyQt4 import QtGui, QtCore
from pyqt_env import PyQTEnv
import xml.etree.ElementTree as ET
TEST_WATERFALL_VIEW = False
gui_root_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
class MainWindow(QtGui.QMainWindow):
def __init__(self, args):
super(MainWindow, self).__init__()
self.setGeometry(1400,70,600,370)
self.setWindowTitle("VISUALIZATION")
self.action_cb = args
quitAction = QtGui.QAction('Quit', self)
quitAction.triggered.connect(self.close_application)
saveAction = QtGui.QAction('Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.triggered.connect(self.save_WatchList)
loadAction = QtGui.QAction('Open File...', self)
loadAction.setShortcut('Ctrl+O')
loadAction.triggered.connect(self.action_cb['load_WatchList'])
input_file = QtGui.QAction('Open input file', self)
input_file.setShortcut('Ctrl+I')
input_file.triggered.connect(self.open_input_file)
menu = self.menuBar()
filemenu = menu.addMenu('&File')
filemenu.addAction(saveAction)
filemenu.addAction(loadAction)
filemenu.addAction(input_file)
self.toolBar = self.addToolBar("ToolBar")
self.toolBar.addAction(quitAction)
self.create_sub_windows()
def create_sub_windows(self):
pausecheck = QtGui.QCheckBox('Pause', self)
pausecheck.move(520,120)
pausecheck.toggle()
pausecheck.stateChanged.connect(self.action_cb['on_pause'])
self.step_btn = QtGui.QPushButton("Step",self)
self.step_btn.setStyleSheet("color: blue; font: bold 14px")
self.step_btn.resize(50,25)
self.step_btn.move(520,80)
self.step_btn.clicked.connect(self.action_cb['on_step'])
self.watch_com = QtGui.QLabel(self)
self.watch_com.setText('Watch :')
self.watch_com.move(520,244)
self.watch_com.setFont(QtGui.QFont("Times",13,weight=QtGui.QFont.Bold))
self.watch_choice = QtGui.QComboBox(self)
self.watch_choice.setStyleSheet("font: bold 14px")
self.watch_choice.move(520,280)
self.watch_choice.addItem('1-DIM')
self.watch_choice.addItem('2-DIM')
self.watch_choice.addItem('Activation')
self.watch_choice.addItem('Filter')
self.watch_choice.resize(70,30)
self.watch_choice.show()
self.watch_choice.activated[str].connect(self.action_cb['on_add_watch'])
self.showbtn = QtGui.QCheckBox('Show',self)
self.showbtn.move(520,195)
self.showbtn.toggle()
self.showbtn.hide()
self.showbtn.stateChanged.connect(self.action_cb['on_set_show'])
self.show_remove_btn = QtGui.QPushButton("Remove",self)
self.show_remove_btn.setStyleSheet("color: red; font: bold 14px")
self.show_remove_btn.resize(70,30)
self.show_remove_btn.move(520,240)
self.show_remove_btn.hide()
self.show_remove_btn.clicked.connect(self.action_cb['on_remove_watch'])
self.hd_all_btn = QtGui.QPushButton("Hide All",self)
self.hd_all_btn.setStyleSheet("color: red; font: bold 14px")
self.hd_all_btn.resize(84,30)
self.hd_all_btn.move(510,280)
self.hd_all_btn.hide()
self.hd_all_btn.clicked.connect(self.action_cb['on_hide_all'])
self.tensor_label = QtGui.QLabel(self)
self.tensor_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_label.setGeometry(QtCore.QRect(80,180,200,20))
self.tensor_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.tensor_reshape_label = QtGui.QLabel(self)
self.tensor_reshape_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_reshape_label.setGeometry(QtCore.QRect(80,220,200,20))
self.tensor_reshape_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.reshape_inlb = QtGui.QLabel(self)
self.reshape_inlb.move(80,220)
self.reshape_inlb.setText('Reshape: ')
self.reshape_inlb.setFont(QtGui.QFont('Times',12,weight=QtGui.QFont.Bold))
self.tensor_shape_input = QtGui.QLineEdit(self)
self.tensor_shape_input.textChanged.connect(self.action_cb['on_tensor_shape_input'])
self.tensor_shape_input.move(160,220)
self.sourceInput_list = QtGui.QComboBox(self)
self.sourceInput_list.move(160,270)
self.sourceInput_list.activated[str].connect(self.action_cb['on_input_select'])
listcombo = QtGui.QComboBox(self)
listcombo.addItem("Select List")
listcombo.addItem("Watch List")
listcombo.move(50,100)
subcombo = QtGui.QComboBox(self)
subcombo.addItem('USER_LIST')
subcombo.addItem('TRAINABLE_VARIABLES')
subcombo.addItem('ACTIVATIONS')
subcombo.addItem('GLOBAL_VARIABLES')
subcombo.addItem('ALL_OPS')
subcombo.move(180,100)
listcombo.activated[str].connect(self.action_cb['on_list_type_select'])
subcombo.activated[str].connect(self.action_cb['on_filter_type_select'])
self.create_list_view()
fontset = QtGui.QFont()
fontset.setPointSize(12)
self.filter_comment = QtGui.QLabel(self)
self.filter_comment.setText('Search Only in ALL_OPS:')
self.filter_comment.setGeometry(QtCore.QRect(100,34,180,25))
self.filter_comment.setFont(fontset)
self.filter_in = QtGui.QLineEdit(self)
self.filter_in.textChanged.connect(self.action_cb['on_filter_str_input'])
self.filter_in.move(290,30)
self.filter_in.resize(190,40)
self.show()
def create_list_view(self):
self.list_view=QtGui.QListView(self)
self.list_view.main = self
self.list_view.setEditTriggers(QtGui.QListView.NoEditTriggers)
self.list_view.setMouseTracking(True)
self.list_model = QtGui.QStandardItemModel()
self.list_view.setModel(self.list_model)
entries = [str(i) for i in range(50)]
for i in entries:
item = QtGui.QStandardItem(i)
self.list_model.appendRow(item)
self.list_view.setMinimumSize(170,200)
self.list_view.move(310,130)
self.list_view.clicked.connect(self.action_cb['on_tensor_select'])
def close_application(self):
choice = QtGui.QMessageBox.question(self, 'Warning',
"Do you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_close']()
else:
pass
def save_WatchList(self):
choice = QtGui.QMessageBox.question(self, '',
"Do you want to save the watch_list?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_save']()
else:
pass
def update_tensor_list(self, list_type, list, pos, reset_pos):
items_str = [t.disp_name for t in list]
self.list_model.clear()
for text in items_str:
item = QtGui.QStandardItem(text)
self.list_model.appendRow(item)
def open_input_file(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open input file')
input_file = open(name, 'r')
DIYname = QtGui.QInputDialog.getText(self, 'Name your input choice', None)
save_name = DIYname[0]
self.action_cb['add_input'](save_name, input_file.name)
def update_input_list(self, input_list):
self.sourceInput_list.clear()
for item in input_list:
self.sourceInput_list.addItem(item.name)
def enable_filter_input(self, enable):
if enable is False:
self.filter_in.setDisabled(True)
else:
self.filter_in.setDisabled(False)
class TensorItem(object):
def __init__(self, name, shape, op, input_name):
self.name = name
self.op = op
self.input_name = input_name
self.disp_name = name
try:
shape_str = '(' + ', '.join(map(str, shape)) + ')'
self.shape_str = shape_str
self.reshape = []
except: .shape_str = ""
self.reshape = []
def copy(self, obj):
self.name = copy.copy(obj.name)
self.input_name = copy.copy(obj.input_name)
self.op = obj.op
self.disp_name = copy.copy(obj.disp_name)
self.shape_str = copy.copy(obj.shape_str)
self.reshape = copy.copy(obj.reshape)
def get_reshape_str(self):
return ', '.join(map(str, self.reshape))
class ControlPanel(object):
quit = False
pause = True
single_step_flag = False
step_count = 0
cur_list_type = 0
cur_filter_type_index = 0
tensor_select_list = []
select_list_cur_pos = 0
tensor_watch_list = []
watch_list_cur_pos = 0
tensor_input_list = []
console_cmd_list = []
pyqt_env = None
class TensorSelectItem(TensorItem):
def __init__(self, name, shape, op, input_name):
TensorItem.__init__(self, name, shape, op, input_name)
class TensorWatchItem(TensorItem):
def __init__(self, tensor_select_item):
self.showstate = True
self.copy(tensor_select_item)
self.data_source = TensorData(start_step=ControlPanel.step_count)
self.pyqt_window_id = None
self.picDIM = '1-DIM'
class TensorInputItem(object):
def __init__(self, name, input_obj):
self.name = name
self.input_obj = input_obj
def __open_tensor_view(self, index, text):
tensor_item = self.tensor_watch_list[index]
tensor_item.pyqt_window_id = self.pyqt_env.get_free_identity()
if text == '2-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView2D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = '2-DIM'
elif text == '1-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView1D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name})
self.tensor_watch_list[index].picDIM = '1-DIM'
elif text == 'Activation':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewAct,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Activation'
elif text == 'Filter':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewFilter,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Filter'
def __close_tensor_view(self, index):
tensor_item = self.tensor_watch_list[index]
if tensor_item.pyqt_window_id is not None:
self.pyqt_env.close(tensor_item.pyqt_window_id)
tensor_item.pyqt_window_id = None
def __close_all_tensor_views(self):
for i in range(len(self.tensor_watch_list)):
self.__close_tensor_view(i)
def __on_tensor_shape_input(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
dims = text.split(',')
titem.reshape = []
for dim in dims:
try:
titem.reshape.append(int(dim))
except ValueError:
pass
def __on_add_watch(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
new_titem = self.TensorWatchItem(titem)
self.tensor_watch_list.append(new_titem)
index = len(self.tensor_watch_list)-1
self.__open_tensor_view(index,text)
def __on_remove_watch(self):
self.__close_tensor_view(self.watch_list_cur_pos)
del self.tensor_watch_list[self.watch_list_cur_pos]
item_num = len(self.tensor_watch_list)
if self.watch_list_cur_pos >= item_num and item_num > 0:
self.watch_list_cur_pos = item_num-1
if self.cur_list_type==0:
list = self.tensor_select_list
pos = self.select_list_cur_pos
else:
list = self.tensor_watch_list
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=list, pos=pos, reset_pos=False)
def __on_set_show(self, state):
if state == QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == False:
self.__open_tensor_view(self.watch_list_cur_pos, self.tensor_watch_list[self.watch_list_cur_pos].picDIM)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = True
if state != QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == True:
self.__close_tensor_view(self.watch_list_cur_pos)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = False
def __on_input_select(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
titem.input_name = text
input_obj = self.__get_input_obj(text)
if input_obj is not None:
input_obj.show()
def __on_tensor_select(self, index):
index = index.row()
if self.cur_list_type == 0:
self.select_list_cur_pos = index
list = self.tensor_select_list
print(list[index].shape_str)
else:
self.watch_list_cur_pos = index
list = self.tensor_watch_list
if self.tensor_watch_list[index].showstate == False:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.setChecked(True)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(list[index].get_reshape_str())+')')
self.main_window.tensor_label.setText('Shape: '+list[index].shape_str)
def __on_list_type_select(self, text):
if text == 'Select List':
index = 0
else:
index = 1
if index != self.cur_list_type:
if index == 0:
self.main_window.enable_filter_input(True)
else:
self.main_window.enable_filter_input(False)
self.cur_list_type = index
self.on_switch_btn(self.cur_list_type)
if self.cur_list_type == 0:
pos = self.select_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=pos, reset_pos=False)
else:
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_watch_list, pos=pos, reset_pos=False)
def on_switch_btn(self,index):
if index == 0:
self.main_window.watch_choice.show()
self.main_window.show_remove_btn.hide()
self.main_window.hd_all_btn.hide()
self.main_window.showbtn.hide()
self.main_window.watch_com.show()
self.main_window.tensor_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_select_list[0].shape_str)
self.main_window.tensor_shape_input.show()
self.main_window.reshape_inlb.show()
self.main_window.tensor_shape_input.clear()
self.main_window.tensor_reshape_label.hide()
else:
self.main_window.watch_choice.hide()
self.main_window.show_remove_btn.show()
self.main_window.hd_all_btn.show()
self.main_window.watch_com.hide()
self.main_window.tensor_shape_input.hide()
if self.tensor_watch_list != []:
self.main_window.showbtn.show()
self.main_window.tensor_label.show()
self.main_window.tensor_reshape_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_watch_list[0].shape_str)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(self.tensor_watch_list[0].get_reshape_str())+')')
if self.tensor_watch_list[0].showstate == True:
self.main_window.showbtn.setChecked(True)
else:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.hide()
self.main_window.tensor_label.hide()
self.main_window.tensor_reshape_label.hide()
self.main_window.reshape_inlb.hide()
def __on_filter_type_select(self, text):
pwd = {'USER_LIST':0, 'TRAINABLE_VARIABLES':1, 'ACTIVATIONS':2, 'GLOBAL_VARIABLES':3, 'ALL_OPS':4 }
self.cur_filter_type_index = pwd[text]
if pwd[text] == 2:
pass
else:
pass
def __on_filter_str_input(self, text):
text = str(text)
self.filter_str = text.strip()
def __on_pause(self, state):
if state == QtCore.Qt.Checked:
self.pause = True
else:
self.pause = False
print(self.pause)
def __on_step(self):
self.pause = True
self.single_step_flag = True
def __on_hide_all(self):
self.__close_all_tensor_views()
self.main_window.showbtn.hide()
def __on_console_str_input(self):
return
cmd = copy.copy(text.strip())
self.console_cmd_list.append(cmd)
def __on_close(self):
self.quit = True
def __on_save(self):
NoWatchItem = len(self.tensor_watch_list)
watchlist = [None]*NoWatchItem
root = ET.Element('root')
for i in range(NoWatchItem):
watchlist[i] = ET.SubElement(root, 'Item'+str(i+1))
name = ET.SubElement(watchlist[i], 'name')
shape = ET.SubElement(watchlist[i], 'shape')
reshape = ET.SubElement(watchlist[i], 'reshape')
visType = ET.SubElement(watchlist[i], 'visType')
win_x = ET.SubElement(watchlist[i], 'win_x')
win_y = ET.SubElement(watchlist[i], 'win_y')
win_w = ET.SubElement(watchlist[i], 'win_w')
win_h = ET.SubElement(watchlist[i], 'win_h')
name.text = self.tensor_watch_list[i].name
shape.text = self.tensor_watch_list[i].shape_str
reshape.text = self.tensor_watch_list[i].reshape
visType.text = self.tensor_watch_list[i].picDIM
(x,y,w,h) = self.pyqt_env.get_win_pos_size(self.tensor_watch_list[i].pyqt_window_id)
win_x.text = str(x)
win_y.text = str(y)
win_w.text = str(w)
win_h.text = str(h)
my = ET.tostring(root)
myfile = open('Saved_WatchList.xml', 'wb')
myfile.write(my)
def __load_WatchList(self):
tree = ET.parse('Saved_WatchList.xml')
root = tree.getroot()
count = len(self.tensor_watch_list)
print(count)
for elem in root:
n = elem[0].text
for t in self.all_ops:
if t.name == n:
tem_select = self.TensorSelectItem(t.name, t.shape, t.op, self.tensor_input_list[0].name)
new = self.TensorWatchItem(tem_select)
self.tensor_watch_list.append(new)
print('now',len(self.tensor_watch_list), 'but count: ', count)
self.__open_tensor_view(count, elem[3].text)
self.pyqt_env.set_win_pos_size(self.tensor_watch_list[count].pyqt_window_id, \
int(elem[4].text),int(elem[5].text),int(elem[6].text),int(elem[7].text))
break
count += 1
def __create_main_window(self, args):
self.main_window = MainWindow(
{
'filter_type_list':self.filter_type_list,
'tensor_input_list': self.tensor_input_list,
'on_close':self.__on_close,
'on_save':self.__on_save,
'on_pause':self.__on_pause,
'on_step':self.__on_step,
'on_hide_all':self.__on_hide_all,
'on_console_str_input':self.__on_console_str_input,
'on_filter_type_select':self.__on_filter_type_select,
'on_filter_str_input':self.__on_filter_str_input,
'on_list_type_select':self.__on_list_type_select,
'on_tensor_select':self.__on_tensor_select,
'on_tensor_shape_input':self.__on_tensor_shape_input,
'on_input_select':self.__on_input_select,
'on_remove_watch':self.__on_remove_watch,
'on_add_watch':self.__on_add_watch,
'on_set_show':self.__on_set_show,
'load_WatchList':self.__load_WatchList,
'add_input':self.__add_input
}
)
return None
def __init__(self, filter_type_list, input_list, loaded_list):
for input_name in input_list:
self.tensor_input_list.append(self.TensorInputItem(input_name, None))
self.filter_str = ""
self.filter_type_list = filter_type_list
self.pyqt_env = PyQTEnv()
self.pyqt_env.run(self.__create_main_window, None)
self.main_window.update_input_list(self.tensor_input_list)
print('control_panel _init')
self.all_ops = loaded_list
def __get_input_obj(self, name):
for input_item in self.tensor_input_list:
if input_item.name == name:
return input_item.input_obj
return None
def __add_input(self, input_name, filename, config_dict={}):
import importlib
try:
placeholder_dict={}
for t in self.all_ops:
if t.op.op.type == 'Placeholder':
placeholder_dict[t.name] = t.op
names = os.path.split(os.path.abspath(filename))
path = names[0]
module_name = names[1].split('.')[-2]
print('* input_name is: %s, filename is: %s'%(input_name, filename))
print('* config_dict is:', config_dict)
print('* module path is: %s, name is: %s'%(path, module_name))
sys.path.append(path)
temp_module = importlib.import_module(module_name)
input_obj = temp_module.TensorInput(placeholder_dict, config_dict)
input_obj.show()
input_item = self.TensorInputItem(input_name, input_obj)
self.tensor_input_list.append(input_item)
self.main_window.update_input_list(self.tensor_input_list)
except Exception as e:
print('Add_input error:', e)
def update_tensor_list(self, tensor_list):
self.tensor_select_list = []
for t in tensor_list:
if len(self.tensor_input_list)>0:
input_name = self.tensor_input_list[0].name
else:
input_name = ''
self.tensor_select_list.append(self.TensorSelectItem(t[0], t[1], t[2], input_name))
if self.cur_list_type == 0:
self.select_list_cur_pos = 0
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=0, reset_pos=True)
def get_tensor_watch_list(self):
dict = {}
for input_item in self.tensor_input_list:
list = []
for t in self.tensor_watch_list:
if t.pyqt_window_id is not None and input_item.name == t.input_name:
list.append((t.name, t.reshape, t.op, t.data_source, t.input_name))
if len(list)>0:
dict[input_item] = list
return dict
def beat(self, update_step_flag):
if update_step_flag:
self.single_step_flag = False
ControlPanel.step_count += 1
if self.quit:
self.pyqt_env.quit()
return not self.quit
def is_pause(self):
return self.pause
def is_step(self):
return self.single_step_flag
def get_filter_type(self):
return [self.filter_type_list[self.cur_filter_type_index], self.filter_str]
def get_console_command(self):
if len(self.console_cmd_list)>0:
cmd = self.console_cmd_list.pop()
return cmd
| true
| true
|
79075d1fcc5d91a6a0519eb1345af627dcca21c1
| 17,062
|
py
|
Python
|
src/gulpio2/fileio.py
|
kiyoon/GulpIO2
|
143d53dbb7091b0938832415e32e04992439faf6
|
[
"MIT"
] | null | null | null |
src/gulpio2/fileio.py
|
kiyoon/GulpIO2
|
143d53dbb7091b0938832415e32e04992439faf6
|
[
"MIT"
] | null | null | null |
src/gulpio2/fileio.py
|
kiyoon/GulpIO2
|
143d53dbb7091b0938832415e32e04992439faf6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import re
import pickle
import json
import glob
import numpy as np
from abc import ABC, abstractmethod
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from collections import namedtuple, OrderedDict
from tqdm import tqdm
from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY
from pathlib import Path
#from simplejpeg import is_jpeg
def is_jpeg(data):
"""
Check whether a bytes object (or similar) contains JPEG (JFIF) data.
Returns False for truncated files.
Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.
:param data: JPEG (JFIF) data
:return: True if JPEG
"""
return data[:2] == b'\xFF\xD8'
ImgInfo = namedtuple('ImgInfo', ['loc',
'pad',
'length'])
class FileFormatException(Exception):
pass
class AbstractSerializer(ABC): # pragma: no cover
@abstractmethod
def load(self, file_name):
pass
@abstractmethod
def dump(self, thing, file_name):
pass
class PickleSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'rb') as file_pointer:
return pickle.load(file_pointer)
def dump(self, thing, file_name):
with open(file_name, 'wb') as file_pointer:
pickle.dump(thing, file_pointer)
class JSONSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'r') as file_pointer:
return json.load(file_pointer, object_pairs_hook=OrderedDict)
def dump(self, thing, file_name):
with open(file_name, 'w') as file_pointer:
json.dump(thing, file_pointer)
pickle_serializer = PickleSerializer()
json_serializer = JSONSerializer()
def extract_input_for_getitem(element):
if isinstance(element, tuple) and len(element) == 2:
id_, slice_ = element
elif isinstance(element, (int, str)):
id_, slice_ = element, None
else:
raise TypeError("Undefined input type! id or (id, slice) expected")
id_ = str(id_)
return id_, slice_
class GulpDirectory(object):
""" Represents a directory containing *.gulp and *.gmeta files.
Parameters
----------
output_dir: str
Path to the directory containing the files.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
Attributes
----------
all_meta_dicts: list of dicts
All meta dicts from all chunks as a list.
chunk_lookup: dict: int -> str
Mapping element id to chunk index.
chunk_objs_lookup: dict: int -> GulpChunk
Mapping element id to chunk index.
merged_meta_dict: dict: id -> meta dict
all meta dicts merged
"""
def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img):
self.output_dir = output_dir
self.jpeg_decoder = jpeg_decoder
self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks()))
self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()]
self.num_chunks = len(self.chunk_objs_lookup)
self.chunk_lookup = {}
for chunk_id, chunk in self.chunk_objs_lookup.items():
for id_ in chunk.meta_dict:
self.chunk_lookup[id_] = chunk_id
self.merged_meta_dict = {}
for d in self.all_meta_dicts:
for k in d.keys():
assert k not in self.merged_meta_dict,\
"Duplicate id detected {}".format(k)
else:
self.merged_meta_dict.update(d)
def __iter__(self):
return iter(self.chunk_objs_lookup.values())
def chunks(self):
""" Return a generator over existing GulpChunk objects which are ready
to be opened and read from. """
return self.__iter__()
def _chunks(self):
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._existing_file_paths())
def new_chunks(self, total_new_chunks):
""" Return a generator over freshly setup GulpChunk objects which are ready
to be opened and written to.
Parameters
----------
total_new_chunks: int
The total number of new chunks to initialize.
"""
return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._allocate_new_file_paths(total_new_chunks)))
def __getitem__(self, element):
id_, _ = extract_input_for_getitem(element)
chunk_id = self.chunk_lookup[id_]
gulp_chunk = self.chunk_objs_lookup[chunk_id]
with gulp_chunk.open():
return gulp_chunk[element]
def _find_existing_data_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp')))
def _find_existing_meta_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta')))
def _load_label_dict(self):
return json.load(open(os.path.join(self.output_dir, 'label2idx.json'),
'rb'))
def _existing_file_paths(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
assert len(data_paths) == len(meta_paths)
return zip(data_paths, meta_paths)
def _find_ids_from_paths(self, paths):
return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths]
def _chunk_ids(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
data_ids = self._find_ids_from_paths(data_paths)
meta_ids = self._find_ids_from_paths(meta_paths)
assert data_ids == meta_ids
return data_ids
def _next_chunk_id(self):
existing_chunk_ids = self._chunk_ids()
next_chunk_id = 0
if len(existing_chunk_ids) > 0:
next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1
return next_chunk_id
def _allocate_new_file_paths(self, total_new_chunks):
next_chunk_id = self._next_chunk_id()
return [self._initialize_filenames(i)
for i in range(next_chunk_id,
next_chunk_id + total_new_chunks)]
def _initialize_filenames(self, chunk_id):
data_file_path = os.path.join(
self.output_dir, 'data_{}.gulp'.format(chunk_id))
meta_file_path = os.path.join(
self.output_dir, 'meta_{}.gmeta'.format(chunk_id))
return data_file_path, meta_file_path
class GulpChunk(object):
""" Represents a gulp chunk on disk.
Parameters
----------
data_file_path: str
Path to the *.gulp file.
meta_file_path: str
Path to the *.gmeta file.
serializer: subclass of AbstractSerializer
The type of serializer to use.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
"""
def __init__(self, data_file_path, meta_file_path,
serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img):
self.jpeg_decoder = jpeg_decoder
self.serializer = serializer
self.data_file_path = data_file_path
self.meta_file_path = meta_file_path
self.meta_dict = self._get_or_create_dict()
self._img_info = {}
self.fp = None
def __contains__(self, id_):
return str(id_) in self.meta_dict
def __getitem__(self, element):
id_, slice_ = extract_input_for_getitem(element)
return self.read_frames(id_, slice_)
def __iter__(self):
return self.iter_all()
def _get_frame_infos(self, id_):
id_ = str(id_)
if id_ in self.meta_dict:
return (self._get_or_create_img_info(id_),
self._copy_meta_data(id_))
def _copy_meta_data(self, id_):
return dict(self.meta_dict[id_]['meta_data'][0])
def _get_or_create_img_info(self, id_):
if id_ not in self._img_info:
self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']]
return self._img_info[id_]
def _get_or_create_dict(self):
if os.path.exists(self.meta_file_path):
return self.serializer.load(self.meta_file_path)
else:
return OrderedDict()
@staticmethod
def _default_factory():
return OrderedDict([('frame_info', []), ('meta_data', [])])
@staticmethod
def _pad_image(number):
return (4 - (number % 4)) % 4
def _append_meta(self, id_, meta_data):
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['meta_data'].append(meta_data)
def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
loc = self.fp.tell()
if isinstance(image, (str, Path)):
# If image is a string or pathlib Path, assume that it is a path to a jpeg file
# and add it directly without decoding and encoding it.
with open(str(image), 'rb') as image_file:
img_str = image_file.read()
if not is_jpeg(img_str):
raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.')
else: # np.array
img_str = img_to_jpeg_bytes(image, jpeg_encode_quality)
assert len(img_str) > 0
pad = self._pad_image(len(img_str))
record = img_str.ljust(len(img_str) + pad, b'\0')
assert len(record) > 0
img_info = ImgInfo(loc=loc,
length=len(record),
pad=pad)
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['frame_info'].append(img_info)
self.fp.write(record)
def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
for frame in frames:
self._write_frame(id_, frame, jpeg_encode_quality)
@contextmanager
def open(self, flag='rb'):
"""Open the gulp chunk for reading.
Parameters
----------
flag: str
'rb': Read binary
'wb': Write binary
'ab': Append to binary
Notes
-----
Works as a context manager but returns None.
"""
if flag in ['wb', 'rb', 'ab']:
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
yield
if flag in ['wb', 'ab']:
self.flush()
self.fp.close()
def flush(self):
"""Flush all buffers and write the meta file."""
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path)
def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
""" Append an item to the gulp.
Parameters
----------
id_ : str
The ID of the item
meta_data: dict
The meta-data associated with the item.
frames: list of numpy arrays
The frames of the item as a list of numpy dictionaries consisting
of image pixel values.
"""
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality)
def read_frames(self, id_, slice_=None):
""" Read frames for a single item.
Parameters
----------
id_: str
The ID of the item
slice_: slice or list of ints:
A slice or list of indices with which to select frames.
Returns
-------
frames (int), meta(dict)
The frames of the item as a list of numpy arrays consisting of
image pixel values. And the metadata.
"""
frame_infos, meta_data = self._get_frame_infos(id_)
slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:len(record)-frame_info.pad]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info)
for frame_info in selected_frame_infos]
return frames, meta_data
def iter_all(self, accepted_ids=None, shuffle=False):
""" Iterate over all frames in the gulp.
Parameters
----------
accepted_ids: list of str
A filter for accepted ids.
shuffle: bool
Shuffle the items or not.
Returns
-------
iterator
An iterator that yield a series of frames,meta tuples. See
`read_frames` for details.
"""
ids = self.meta_dict.keys()
if accepted_ids is not None:
intersection = list(set(ids) & set(accepted_ids))
ids = [id_ for id_ in ids if id_ in intersection]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
frames, meta = self.read_frames(id_)
yield frames, meta
class ChunkWriter(object):
"""Can write from an adapter to a gulp chunk.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to get items from.
"""
def __init__(self, adapter):
self.adapter = adapter
def write_chunk(self, output_chunk, input_slice):
"""Write from an input slice in the adapter to an output chunk.
Parameters
----------
output_chunk: GulpChunk
The chunk to write to
input_slice: slice
The slice to use from the adapter.
"""
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if len(frames) > 0:
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print("Failed to write video with id: {}; no frames"
.format(id_))
def calculate_chunk_slices(items_per_chunk, num_items):
"""Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices
"""
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)]
class GulpIngestor(object):
"""Ingest items from an adapter into an gulp chunks.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to ingest from.
output_folder: str
The folder/directory to write to.
videos_per_chunk: int
The total number of items per chunk.
num_workers: int
The level of parallelism.
"""
def __init__(self, adapter, output_folder, videos_per_chunk, num_workers):
assert int(num_workers) > 0
self.adapter = adapter
self.output_folder = output_folder
self.videos_per_chunk = int(videos_per_chunk)
self.num_workers = int(num_workers)
def __call__(self):
os.makedirs(self.output_folder, exist_ok=True)
chunk_slices = calculate_chunk_slices(self.videos_per_chunk,
len(self.adapter))
gulp_directory = GulpDirectory(self.output_folder)
new_chunks = gulp_directory.new_chunks(len(chunk_slices))
chunk_writer = ChunkWriter(self.adapter)
with ProcessPoolExecutor(max_workers=self.num_workers) as executor:
result = executor.map(chunk_writer.write_chunk,
new_chunks,
chunk_slices)
for r in tqdm(result,
desc='Chunks finished',
unit='chunk',
dynamic_ncols=True,
total=len(chunk_slices)):
pass
| 32.685824
| 143
| 0.614582
|
import os
import re
import pickle
import json
import glob
import numpy as np
from abc import ABC, abstractmethod
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from collections import namedtuple, OrderedDict
from tqdm import tqdm
from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY
from pathlib import Path
def is_jpeg(data):
return data[:2] == b'\xFF\xD8'
ImgInfo = namedtuple('ImgInfo', ['loc',
'pad',
'length'])
class FileFormatException(Exception):
pass
class AbstractSerializer(ABC):
@abstractmethod
def load(self, file_name):
pass
@abstractmethod
def dump(self, thing, file_name):
pass
class PickleSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'rb') as file_pointer:
return pickle.load(file_pointer)
def dump(self, thing, file_name):
with open(file_name, 'wb') as file_pointer:
pickle.dump(thing, file_pointer)
class JSONSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'r') as file_pointer:
return json.load(file_pointer, object_pairs_hook=OrderedDict)
def dump(self, thing, file_name):
with open(file_name, 'w') as file_pointer:
json.dump(thing, file_pointer)
pickle_serializer = PickleSerializer()
json_serializer = JSONSerializer()
def extract_input_for_getitem(element):
if isinstance(element, tuple) and len(element) == 2:
id_, slice_ = element
elif isinstance(element, (int, str)):
id_, slice_ = element, None
else:
raise TypeError("Undefined input type! id or (id, slice) expected")
id_ = str(id_)
return id_, slice_
class GulpDirectory(object):
def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img):
self.output_dir = output_dir
self.jpeg_decoder = jpeg_decoder
self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks()))
self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()]
self.num_chunks = len(self.chunk_objs_lookup)
self.chunk_lookup = {}
for chunk_id, chunk in self.chunk_objs_lookup.items():
for id_ in chunk.meta_dict:
self.chunk_lookup[id_] = chunk_id
self.merged_meta_dict = {}
for d in self.all_meta_dicts:
for k in d.keys():
assert k not in self.merged_meta_dict,\
"Duplicate id detected {}".format(k)
else:
self.merged_meta_dict.update(d)
def __iter__(self):
return iter(self.chunk_objs_lookup.values())
def chunks(self):
return self.__iter__()
def _chunks(self):
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._existing_file_paths())
def new_chunks(self, total_new_chunks):
return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._allocate_new_file_paths(total_new_chunks)))
def __getitem__(self, element):
id_, _ = extract_input_for_getitem(element)
chunk_id = self.chunk_lookup[id_]
gulp_chunk = self.chunk_objs_lookup[chunk_id]
with gulp_chunk.open():
return gulp_chunk[element]
def _find_existing_data_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp')))
def _find_existing_meta_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta')))
def _load_label_dict(self):
return json.load(open(os.path.join(self.output_dir, 'label2idx.json'),
'rb'))
def _existing_file_paths(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
assert len(data_paths) == len(meta_paths)
return zip(data_paths, meta_paths)
def _find_ids_from_paths(self, paths):
return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths]
def _chunk_ids(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
data_ids = self._find_ids_from_paths(data_paths)
meta_ids = self._find_ids_from_paths(meta_paths)
assert data_ids == meta_ids
return data_ids
def _next_chunk_id(self):
existing_chunk_ids = self._chunk_ids()
next_chunk_id = 0
if len(existing_chunk_ids) > 0:
next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1
return next_chunk_id
def _allocate_new_file_paths(self, total_new_chunks):
next_chunk_id = self._next_chunk_id()
return [self._initialize_filenames(i)
for i in range(next_chunk_id,
next_chunk_id + total_new_chunks)]
def _initialize_filenames(self, chunk_id):
data_file_path = os.path.join(
self.output_dir, 'data_{}.gulp'.format(chunk_id))
meta_file_path = os.path.join(
self.output_dir, 'meta_{}.gmeta'.format(chunk_id))
return data_file_path, meta_file_path
class GulpChunk(object):
def __init__(self, data_file_path, meta_file_path,
serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img):
self.jpeg_decoder = jpeg_decoder
self.serializer = serializer
self.data_file_path = data_file_path
self.meta_file_path = meta_file_path
self.meta_dict = self._get_or_create_dict()
self._img_info = {}
self.fp = None
def __contains__(self, id_):
return str(id_) in self.meta_dict
def __getitem__(self, element):
id_, slice_ = extract_input_for_getitem(element)
return self.read_frames(id_, slice_)
def __iter__(self):
return self.iter_all()
def _get_frame_infos(self, id_):
id_ = str(id_)
if id_ in self.meta_dict:
return (self._get_or_create_img_info(id_),
self._copy_meta_data(id_))
def _copy_meta_data(self, id_):
return dict(self.meta_dict[id_]['meta_data'][0])
def _get_or_create_img_info(self, id_):
if id_ not in self._img_info:
self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']]
return self._img_info[id_]
def _get_or_create_dict(self):
if os.path.exists(self.meta_file_path):
return self.serializer.load(self.meta_file_path)
else:
return OrderedDict()
@staticmethod
def _default_factory():
return OrderedDict([('frame_info', []), ('meta_data', [])])
@staticmethod
def _pad_image(number):
return (4 - (number % 4)) % 4
def _append_meta(self, id_, meta_data):
id_ = str(id_)
if id_ not in self.meta_dict:
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['meta_data'].append(meta_data)
def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
loc = self.fp.tell()
if isinstance(image, (str, Path)):
with open(str(image), 'rb') as image_file:
img_str = image_file.read()
if not is_jpeg(img_str):
raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.')
else:
img_str = img_to_jpeg_bytes(image, jpeg_encode_quality)
assert len(img_str) > 0
pad = self._pad_image(len(img_str))
record = img_str.ljust(len(img_str) + pad, b'\0')
assert len(record) > 0
img_info = ImgInfo(loc=loc,
length=len(record),
pad=pad)
id_ = str(id_)
if id_ not in self.meta_dict:
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['frame_info'].append(img_info)
self.fp.write(record)
def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
for frame in frames:
self._write_frame(id_, frame, jpeg_encode_quality)
@contextmanager
def open(self, flag='rb'):
if flag in ['wb', 'rb', 'ab']:
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
yield
if flag in ['wb', 'ab']:
self.flush()
self.fp.close()
def flush(self):
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path)
def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality)
def read_frames(self, id_, slice_=None):
frame_infos, meta_data = self._get_frame_infos(id_)
slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:len(record)-frame_info.pad]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info)
for frame_info in selected_frame_infos]
return frames, meta_data
def iter_all(self, accepted_ids=None, shuffle=False):
ids = self.meta_dict.keys()
if accepted_ids is not None:
intersection = list(set(ids) & set(accepted_ids))
ids = [id_ for id_ in ids if id_ in intersection]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
frames, meta = self.read_frames(id_)
yield frames, meta
class ChunkWriter(object):
def __init__(self, adapter):
self.adapter = adapter
def write_chunk(self, output_chunk, input_slice):
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if len(frames) > 0:
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print("Failed to write video with id: {}; no frames"
.format(id_))
def calculate_chunk_slices(items_per_chunk, num_items):
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)]
class GulpIngestor(object):
def __init__(self, adapter, output_folder, videos_per_chunk, num_workers):
assert int(num_workers) > 0
self.adapter = adapter
self.output_folder = output_folder
self.videos_per_chunk = int(videos_per_chunk)
self.num_workers = int(num_workers)
def __call__(self):
os.makedirs(self.output_folder, exist_ok=True)
chunk_slices = calculate_chunk_slices(self.videos_per_chunk,
len(self.adapter))
gulp_directory = GulpDirectory(self.output_folder)
new_chunks = gulp_directory.new_chunks(len(chunk_slices))
chunk_writer = ChunkWriter(self.adapter)
with ProcessPoolExecutor(max_workers=self.num_workers) as executor:
result = executor.map(chunk_writer.write_chunk,
new_chunks,
chunk_slices)
for r in tqdm(result,
desc='Chunks finished',
unit='chunk',
dynamic_ncols=True,
total=len(chunk_slices)):
pass
| true
| true
|
79075d5041a1a9aee84461b99b34b935700b750d
| 9,126
|
py
|
Python
|
sdk/python/pulumi_aws/ecs/tag.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/ecs/tag.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/ecs/tag.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
| 35.509728
| 134
| 0.60103
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
return pulumi.get(self, "value")
| true
| true
|
79075d735ff575058160ba7a5485b2050d0cf348
| 48,428
|
py
|
Python
|
super_setup.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
super_setup.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
super_setup.py
|
brmscheiner/ibeis
|
9bb93a6cd74ac47921e734c80917a38609dfe661
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO:
* needs to check if required modules are installed (or prefereably developed)
* needs to be able to ignore plugins that the user doesnt care about
Super Setup
PREREQ:
git config --global push.default current
export CODE_DIR=~/code
mkdir $CODE_DIR
cd $CODE_DIR
git clone https://github.com/WildbookOrg/ibeis.git
cd ibeis
python super_setup.py --bootstrap
OR (if in virtual environment)
python super_setup.py --bootstrap --nosudo
OR
./_scripts/bootstrap.py
THEN
./_scripts/__install_prereqs__.sh
THEN
./super_setup.py --build --develop
./super_setup.py --build --develop
./super_setup.py --status
# If on current branch copy so super setup isn't overwriten as we go
python -c "import utool as ut; ut.copy('super_setup.py', '_ibeis_setup.py')"
# Status
python _ibeis_setup.py -y --gg "git status"
python _ibeis_setup.py -y --gg "git branch"
# Setup Next
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout -b next"
#python _ibeis_setup.py -y --gg "git checkout next"
#python _ibeis_setup.py -y --gg "git push -u origin next"
#python _ibeis_setup.py -y --gg "git push remote origin/next"
####python _ibeis_setup.py -y --gg "git merge master"
#python _ibeis_setup.py -y --gg "git checkout ^HEAD"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# -- MERGE topic -> next
##python _ibeis_setup.py -y --gg "git checkout topic"
##python _ibeis_setup.py -y --gg "git checkout next"
##python _ibeis_setup.py -y --gg "git merge topic"
# -- MERGE next -> master
python _ibeis_setup.py -y --gg "git checkout master"
python _ibeis_setup.py -y --gg "git merge next"
# -- SAFER MERGE topic -> next
python super_setup.py --checkout next
python super_setup.py --newlocalbranch merge_next_joncrall_dev_branch
python super_setup.py --merge joncrall_dev_branch
./run_tests.py
python super_setup.py --checkout next
python super_setup.py --merge merge_next_joncrall_dev_branch
# Push
python _ibeis_setup.py -y --gg "git push"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# MAKE A NEW BRANCH
python super_setup.py --newbranch joncrall_dev_branch
python super_setup.py --checkout joncrall_dev_branch
python super_setup.py --checkout next
python super_setup.py --newbranch jdb
python super_setup.py --checkout jdb
GitReferences:
http://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
FIXME:
graph-viz
pydot
ibeis_cnn
Theano
Lasange
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import dirname, realpath
import platform
import sys
import os
#-----------------
# SYSTEM ENTRY POINT, NO UTOOL, BARE PYTHON
#-----------------
USAGE = ('''
--- USAGE ---
IBEIS (IMAGE ANALYSIS) SUPER SETUP
This script is meant to help setup, install, and update the developer
enviroment for IBEIS Image Analysis.
****
# Step 1 Initial Development Prereqs:
The first thing is to ensure you have a baseline development enviroment (gcc,
fortran, cmake, blas, git, pip, etc...). This should work well for apt-get,
yum, and macports package managers. It is possible to use Windows MinGW, but
it is not well supported.
The following command outputs the commands to install these prereq packages.
python super_setup.py --bootstrap
****
# Step 2 - utool
Just running the script will download and install utool --- a utility library
used in all aspects of the system.
python super_setup.py
****
# Step 3 - Download / Update Image Analysis Packages
Running the script again once utool is installed will ensure the rest of the
repositories are cloned and on your machine in the directory above this one, or
in a custom location set by your $CODE_DIR environment variable. Running with
the pull command will update the packages as well.
python super_setup.py pull
Note: if you have wildme credientials you can run this to setup git
python super_setup.py pull --move-wildme-ssh
****
# Step 3.5 - Grab and Build Extern libraries with scripts
python super_setup.py --opencv
python super_setup.py --hesaff
python super_setup.py --flann
python super_setup.py --dcnn
python super_setup.py --pydarknet
python super_setup.py --pyqt
python super_setup.py --pyrf
****
# Step 4 - Build C++ components.
Some submodles require C++ libraries. Build them using the following Command.
python super_setup.py build
****
# Step 5 - Install the system.
Register these packages with the python enviroment.
# Install external modules
python super_setup.py --develop
# Install the ibeis module
pip install -e .
--- /USAGE ---
''')
def define_argparse():
""" todo, find a way to use this effectively """
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
# parser.add_argument('command', help='command to run')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ''), action='store_true',
default=False, help=help)
# subparsers = parser.add_subparsers()
# subparsers.add_parser('pull', help='pulls IBEIS repos')
# subparsers.add_parser('ensure', help='ensures checkouts of IBEIS repos')
# sub = subparsers.add_parser('move-wildme', help='changes to the wildme repos')
# sub.add_argument('--fmt', dest='fmt', action='store',
# choices=['ssh', 'https'], help='url type')
# # Setup options for parser_a
# # Add nargs="*" for zero or more other commands
# parser.add_argument('extra', nargs = "*", help = 'Other commands')
# parser.add_argument('command', action='store_true', default=False,
# help='outputs commands to install prereqs')
g1 = parser.add_argument_group('setup')
add_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme',
help='changes to the wildme repos')
args = parser.parse_args()
return args
# args = define_argparse()
# print('args = %r' % (args,))
# sys.exit(1)
def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier
def import_module_from_fpath(module_fpath):
""" imports module from a file path """
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module
def bootstrap(WIN32):
if WIN32:
# need to preinstall parse
win32bootstrap_fpath = os.path.abspath('_scripts/win32bootstrap.py')
win32bootstrap = import_module_from_fpath(win32bootstrap_fpath)
win32bootstrap.bootstrap_sysreq()
else:
#import bootstrap
bootstrap_fpath = os.path.abspath('_scripts/bootstrap.py')
bootstrap = import_module_from_fpath(bootstrap_fpath)
#sys.path.append(os.path.abspath('_scripts'))
bootstrap.bootstrap_sysreq()
sys.exit(0)
#################
# ENSURING UTOOL
#################
def syscmd(cmdstr):
print('RUN> ' + cmdstr)
os.system(cmdstr)
def in_virtual_env():
print('sys.real_prefix=%r' % (getattr(sys, 'real_prefix', None),))
print('sys.base_prefix=%r' % (getattr(sys, 'base_prefix', None),))
print('sys.prefix=%r' % (getattr(sys, 'prefix', None),))
in_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
in_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
in_venv = sys.base_prefix != sys.prefix
return in_venv
def ensure_utool(CODE_DIR, pythoncmd):
WIN32 = sys.platform.startswith('win32')
#UTOOL_BRANCH = ' -b <branch> <remote_repo>'
UTOOL_BRANCH = 'next'
UTOOL_REPO = 'https://github.com/WildbookOrg/utool.git'
print('WARNING: utool is not found')
print('Attempting to get utool. Enter (y) to continue')
if '-y' in sys.argv:
ans = 'y'
else:
try:
ans = input('Enter y to continue. Anything else to exit...\n')
except:
ans = raw_input('Enter y to continue. Anything else to exit...\n') # NOQA
if ans != 'y':
print('Please install utool to continue')
sys.exit(0)
cwdpath = os.path.realpath(os.getcwd())
usr_code_dir = os.path.expanduser(CODE_DIR)
os.chdir(usr_code_dir)
print("user code dir = %r" % usr_code_dir)
print('cloning utool')
if not os.path.exists('utool'):
syscmd('git clone ' + UTOOL_REPO + ' -b ' + UTOOL_BRANCH)
os.chdir('utool')
print('pulling utool')
syscmd('git pull')
print('installing utool for development')
cmdstr = '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd)
# TODO: use pip instead
# cmdstr = '{pythoncmd} -m pip install .'.format(pythoncmd=pythoncmd)
if not WIN32 and not in_virtual_env():
cmdstr = 'sudo ' + cmdstr
syscmd(cmdstr)
os.chdir(cwdpath)
# sys.path.append(usr_code_dir)
print('Please rerun super_setup.py')
print(' '.join(sys.argv))
sys.exit(1)
#-----------------
# UTOOL PYTHON
#-----------------
def initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3):
import utool as ut
WITH_CNN = True
#WITH_TPL = True
WITH_QT = not ut.get_argflag('--no-qt')
WITH_GUI = not ut.get_argflag('--no-gui')
WITH_CUSTOM_TPL = True
WITH_PLUGINS = True
#-----------
# IBEIS project repos
#-----------
# if True:
# jon_repo_base = 'https://github.com/WildbookOrg'
# jason_repo_base = 'https://github.com/WildbookOrg'
# else:
# jon_repo_base = 'https://github.com/wildme'
# jason_repo_base = 'https://github.com/wildme'
ibeis_rman = ut.RepoManager([
'https://github.com/WildbookOrg/utool.git',
# 'https://github.com/WildbookOrg/sandbox_utools.git',
'https://github.com/WildbookOrg/vtool.git',
'https://github.com/WildbookOrg/dtool.git',
'https://github.com/Erotemic/ubelt.git',
'https://github.com/WildbookOrg/detecttools.git',
], CODE_DIR, label='core', pythoncmd=pythoncmd)
tpl_rman = ut.RepoManager([], CODE_DIR, label='tpl', pythoncmd=pythoncmd)
if not GET_ARGFLAG('--ignore-opencv'):
cv_repo = ut.Repo('https://github.com/Itseez/opencv.git', CODE_DIR, modname='cv2')
tpl_rman.add_repo(cv_repo)
if WITH_GUI:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/plottool.git',
])
if WITH_QT:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/guitool.git',
])
tpl_rman.add_repo(ut.Repo(modname=('PyQt4', 'PyQt5', 'PyQt')))
if WITH_CUSTOM_TPL:
flann_repo = ut.Repo('https://github.com/WildbookOrg/flann.git', CODE_DIR, modname='pyflann')
ibeis_rman.add_repo(flann_repo)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/hesaff.git',
])
if WITH_CNN:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis_cnn.git',
'https://github.com/WildbookOrg/pydarknet.git',
'https://gitlab.com/bluemellophone/lightnet.git',
'https://gitlab.com/bluemellophone/brambox.git',
])
# NEW CNN Dependencies
tpl_rman.add_repos([
'https://github.com/pytorch/pytorch.git',
])
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman.add_repos([
'https://github.com/Theano/libgpuarray.git',
])
# CNN Dependencies
tpl_rman.add_repos([
'https://github.com/Theano/Theano.git',
# 'https://github.com/lisa-lab/pylearn2.git',
'https://github.com/Lasagne/Lasagne.git',
])
if WITH_PLUGINS:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis-flukematch-module.git',
'https://github.com/WildbookOrg/ibeis-curvrank-module.git',
'https://github.com/WildbookOrg/ibeis-deepsense-module.git',
'https://github.com/WildbookOrg/ibeis-finfindr-module.git',
'https://github.com/WildbookOrg/ibeis-kaggle7-module.git',
'https://github.com/WildbookOrg/pyrf.git',
])
if False:
# Depricated
ibeis_rman.add_repos([
#'https://github.com/WildbookOrg/pybing.git',
#'https://github.com/aweinstock314/cyth.git',
#'https://github.com/hjweide/pygist',
])
# Add main repo (Must be checked last due to dependency issues)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis.git',
])
#-----------
# Custom third party build/install scripts
#-----------
define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3)
return tpl_rman, ibeis_rman
def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
"""
export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True
python -c "import pydot; print(pydot.__file__)"
python -c "import pydot; print(pydot.__version__)"
python -c "import pydot; print(pydot.find_graphviz())"
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
python -c "import theano; print(theano.__file__)"
# python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import lasagne; print(lasagne.__file__)"
python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"
python -c "import detecttools; print(detecttools.__file__)"
# http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle
pip install vext.pyqt5
sudo apt-get install pyqt5-dev
sudo apt-get install python3-pyqt5
python
python -c "import sip; print('[test] Python can import sip')"
python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))"
python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))"
python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))"
ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5
ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/
ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/
"""
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = '2' if sys.version_info.major == 3 else '3'
pyon = majorminor[0]
plat_spec = get_plat_specifier()
# build_dname = 'build' + ''.join(majorminor)
build_dname = 'cmake_builds/build' + plat_spec
script_fmtdict = {
'pyexe' : sys.executable,
'pyversion' : 'python' + '.'.join(majorminor),
'pypkg_var' : 'PYTHON' + pyon + '_PACKAGES_PATH',
'build_dname' : build_dname,
'pyoff' : pyoff,
'pyon' : pyon,
'cv_pyon_var' : 'BUILD_opencv_python' + pyon,
'cv_pyoff_var' : 'BUILD_opencv_python' + pyoff,
'plat_spec' : plat_spec,
'source_dpath' : '../..',
'libext' : ut.get_lib_ext(),
}
if os.environ.get('VIRTUAL_ENV', '') == '':
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if not os.path.exists(opencv_dir):
if not ut.get_argflag('--opencv'):
opencv_dir = ''
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
# define bash variables for different combinations of python distros and
# virtual environments
python_bash_setup = ut.codeblock(
r'''
# STARTBLOCK bash
if [[ "$VIRTUAL_ENV" == "" ]]; then
# The case where we are installying system-wide
# It is recommended that a virtual enviornment is used instead
export PYTHON_EXECUTABLE=$(which {pyversion})
if [[ '$OSTYPE' == 'darwin'* ]]; then
# Mac system info
export LOCAL_PREFIX=/opt/local
export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
else
# Linux system info
export LOCAL_PREFIX=/usr/local
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
fi
# No windows support here
else
# The prefered case where we are in a virtual environment
export PYTHON_EXECUTABLE=$(which python)
# export LOCAL_PREFIX=$VIRTUAL_ENV/local
export LOCAL_PREFIX=$VIRTUAL_ENV
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO=""
fi
echo "LOCAL_PREFIX = $LOCAL_PREFIX"
echo "{pypkg_var} = ${pypkg_var}"
# ENDBLOCK bash
'''
).format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
#===================
# PYFLANN SETUP SCRIPTS
#===================
ibeis_rman['pyflann'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd {repo_dir}
mkdir -p {build_dname}
cd {build_dname}
cmake -G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE="Release" \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DBUILD_EXAMPLES=Off \
-DBUILD_TESTS=Off \
-DBUILD_PYTHON_BINDINGS=On \
-DBUILD_MATLAB_BINDINGS=Off \
-DBUILD_CUDA_LIB=Off\
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\
{source_dpath}
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict)
)
ibeis_rman['pyflann'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
# The pyflann source lives here
cd {repo_dir}/src/python
# Need to run build to move the libs to the build directory
python setup.py build
# Use pip to editable install
pip install -e {repo_dir}/src/python
# Old way of doing it
# But the setup script is generated during build
# python {repo_dir}/build/src/python/setup.py develop
python -c "import pyflann; print(pyflann.__file__)" --verb-flann
python -c "import pyflann; print(pyflann)" --verb-flann
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath)
)
#===================
# HESAFF
#===================
ibeis_rman['hesaff'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/hesaff
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
cmake -G "Unix Makefiles" \
-DCMAKE_OSX_ARCHITECTURES=x86_64 \
-DCMAKE_C_COMPILER=clang2 \
-DCMAKE_CXX_COMPILER=clang2++ \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
else
cmake -G "Unix Makefiles" \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
fi
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
#make VERBOSE=1
cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYDARKNET
#===================
ibeis_rman['pydarknet'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pydarknet
mkdir -p {build_dname}
cd {build_dname}
if [[ "$(which nvcc)" == "" ]]; then
export CMAKE_CUDA=Off
else
export CMAKE_CUDA=On
fi
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pydarknet
cp -v lib*{libext} {source_dpath}/pydarknet
# cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYRF
#===================
ibeis_rman['pyrf'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pyrf
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pyrf
cp -v lib*{libext} {source_dpath}/pyrf
# cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# OPENCV SETUP SCRIPTS
#===================
"""
./super_setup.py --dump-scripts
"""
tpl_rman['cv2'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
# Checkout opencv core
cd $CODE_DIR
# export REPO_DIR=$CODE_DIR/opencv
export REPO_DIR={repo_dpath}
# git clone https://github.com/Itseez/opencv.git
cd $REPO_DIR
# Checkout opencv extras
git clone https://github.com/Itseez/opencv_contrib.git
# cd opencv_contrib
# git pull
# cd ..
# git pull
mkdir -p $REPO_DIR/{build_dname}
cd $REPO_DIR/{build_dname}
cmake -G "Unix Makefiles" \
-D WITH_OPENMP=ON \
-D CMAKE_BUILD_TYPE=RELEASE \
-D {cv_pyoff_var}=Off \
-D {cv_pyon_var}=On \
-D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \
-D {pypkg_var}=${pypkg_var} \
-D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
-D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \
-D WITH_CUDA=Off \
-D BUILD_opencv_dnn=Off \
-D BUILD_opencv_dnn_modern=Off \
-D WITH_VTK=Off \
-D WITH_CUDA=Off \
-D WITH_MATLAB=Off \
$REPO_DIR
# -D WITH_OPENCL=Off \
# -D BUILD_opencv_face=Off \
# -D BUILD_opencv_objdetect=Off \
# -D BUILD_opencv_video=Off \
# -D BUILD_opencv_videoio=Off \
# -D BUILD_opencv_videostab=Off \
# -D BUILD_opencv_ximgproc=Off \
# -D BUILD_opencv_xobjdetect=Off \
# -D BUILD_opencv_xphoto=Off \
# -D BUILD_opencv_datasets=Off \
# -D CXX_FLAGS="-std=c++11" \ %TODO
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath),
**script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/opencv/{build_dname}
$_SUDO make install
# Hack because cv2 does not want to be installed for some reason
# cp lib/cv2.so $PYTHON_PACKAGES_PATH
# Seems to work now that local is removed from prefix
# cp -v lib/cv2.so $PYTHON_PACKAGES_PATH
# Test makesure things working
python -c "import numpy; print(numpy.__file__)"
python -c "import numpy; print(numpy.__version__)"
python -c "import cv2; print(cv2.__version__)"
python -c "import cv2; print(cv2.__file__)"
#python -c "import vtool"
# Check if we have contrib modules
python -c "import cv2; print(cv2.xfeatures2d)"
# ENDBLOCK
''').format(**script_fmtdict))
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman['libgpuarray'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
# Ensure the repo was checked out
if [ ! -d {repo_dpath} ]; then
git clone https://github.com/Theano/libgpuarray.git {repo_dpath}
fi
{python_bash_setup}
cd {repo_dpath}
# need a specific version of libgpuarray
git checkout tags/v0.6.2 -b v0.6.2
mkdir -p {repo_dpath}/{build_dname}
cd {repo_dpath}/{build_dname}
# First build the C library
cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
$_SUDO make install
# Now build the python libarary
cd {repo_dpath}
python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include
python setup.py build
# python setup.py install
$_SUDO pip install -e {repo_dpath}
# DEVICE="<test device>" python -c "import pygpu;pygpu.test()"
# DEVICE="gpu0" python -c "import pygpu;pygpu.test()"
cd ~
$_SUDO pip install nose
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
# pip uninstall pygpu
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath),
**script_fmtdict))
#===================
# PYQT SETUP SCRIPTS
#===================
if ut.in_virtual_env():
try:
fmtdict = {
'sys_dist_packages': ut.get_global_dist_packages_dir(),
'venv_site_packages': ut.get_site_packages_dir(),
'pyqt' : 'PyQt4' if PY2 else 'PyQt5',
# Need the PyQT5 SVG module for IPython to work properly
'debian-python-qt' : (
'python-qt4' if PY2 else
'qt5-default python3-pyqt5 debian-python-qt-svg'),
'pip-python-qt' : 'python-qt4' if PY2 else 'python-qt5'
}
# sys_dist_packages = ut.get_global_dist_packages_dir()
# sys_pyqt_dir = sys_dist_packages + '/{pyqt}'
# Allows us to use a system qt install in a virtual environment.
system_to_venv = ut.codeblock(
r'''
# STARTBLOCK bash
# Creates a symlink to the global PyQt in a virtual env
export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"
export VENV_DIST_PACKAGES="{venv_site_packages}"
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
echo "have qt"
ls $GLOBAL_DIST_PACKAGES/{pyqt}
ls $VENV_DIST_PACKAGES/{pyqt}
else
# Ensure PyQt is installed first (FIXME make this work for non-debian systems)
sudo apt-get install {debian-python-qt}
# pip install {pip-python-qt}
fi
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
# Install system pyqt packages to virtual envirment via symlink
ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}
ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/
ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/
else
echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"
fi
echo "testing"
python -c "import {pyqt}; print({pyqt})"
# ENDBLOCK bash
''').format(**fmtdict)
# TODO: add custom build alternative
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass
#-----------
# Verify TPL Dependencies
#-----------
def GET_ARGFLAG(arg, *args, **kwargs):
import utool as ut
return arg.lstrip('--') in sys.argv or ut.get_argflag(arg, *args, **kwargs)
def move_wildme(ibeis_rman, fmt):
wildme_user = 'WildbookOrg'
wildme_remote = 'wildme'
for repo in ibeis_rman.repos:
try:
gitrepo = repo.as_gitpython()
except Exception:
repo.change_url_format(fmt)
print('repo {!r} does not exist yet'.format(repo))
continue
wildme_url = repo._new_remote_url(host='github.com', user=wildme_user, fmt=fmt)
remotes = repo.remotes
message = 'Checking %s for move to wildme' % (repo,)
print(message)
incorrect_version = repo._ensure_remote_exists(wildme_remote, wildme_url)
if 'origin' in remotes:
try:
origin = remotes['origin']
origin_protocol = origin['url'].split(':')[0]
origin_user = origin['username']
if origin_user != wildme_user or origin_protocol != fmt or incorrect_version:
if origin_user not in remotes:
# first add a remote that is the original origin
origin_url = origin['url']
print(' * Create remote %r: %r' % (origin_user, origin_url,))
gitrepo.create_remote(origin_user, origin_url)
# change origin to use wildme url
gitorigin = gitrepo.remote('origin')
print(' * Change origin url to %r' % (wildme_url,))
gitorigin.set_url(wildme_url)
except:
print('\tWARNING: COULD NOT MIGRATE REPO = %r' % (repo, ))
repo.change_url_format(fmt)
def execute_commands(tpl_rman, ibeis_rman):
import utool as ut
GET_ARGVAL = ut.get_argval
ut.init_catch_ctrl_c()
if 0:
print('Version Check Source:')
for repo in tpl_rman.repos:
print('python -c "import {0}; print({0}.__file__)"'.format(repo.modname))
print('python -c "import {0}; print({0}.__version__)"'.format(repo.modname))
#-----------
# Execute Commands on Core Repos
#-----------
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo()
print('ibeis_rman = %r' % (ibeis_rman,))
wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG('--move-wildme-ssh')
wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG('--move-wildme-http')
if wildme_ssh_flags or wildme_https_flags:
fmt = 'ssh' if wildme_ssh_flags else 'https'
move_wildme(ibeis_rman, fmt)
# Commands on global git repos
if GET_ARGFLAG('--status'):
ibeis_rman.issue('git status')
sys.exit(0)
ibeis_rman.ensure()
if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'):
dpath = '_super_scripts/' + 'scripts' + get_plat_specifier()
ut.ensuredir(dpath)
dumps = [
(tpl_rman, 'cv2', 'build'),
(tpl_rman, 'cv2', 'install'),
(ibeis_rman, 'flann', 'build'),
(ibeis_rman, 'flann', 'install'),
(ibeis_rman, 'hesaff', 'build'),
(tpl_rman, 'PyQt', 'system_to_venv'),
(tpl_rman, 'libgpuarray', 'build'),
]
for rman, mod, sname in dumps:
from os.path import join
# if mod not in rman:
# print('mod=%r not available in rman=%r' % (mod, rman))
# continue
script = rman[mod].get_script(sname).text
suffix = get_plat_specifier()
sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh')
ut.write_to(sh_fpath, script)
if GET_ARGFLAG('--requirements'):
ut.cmd('pip install -r requirements.txt')
# HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS
if GET_ARGFLAG('--opencv'):
# There is now a pypi for opencv! Yay
# ut.cmd('pip install opencv-python')
# Bummer, but we need opencv source for pyhessaff
# we should just make a wheel for pyhessaff
cv_repo = tpl_rman['cv2']
cv_repo.clone()
script = cv_repo.get_script('build')
script.exec_()
cv_repo = tpl_rman['cv2']
script = cv_repo.get_script('install')
script.exec_()
if GET_ARGFLAG('--flann'):
script = ibeis_rman['flann'].get_script('build')
script.exec_()
script = ibeis_rman['flann'].get_script('install')
script.exec_()
if GET_ARGFLAG('--pyqt'):
script = tpl_rman['PyQt'].get_script('system_to_venv')
script.exec_()
if GET_ARGFLAG('--hesaff'):
script = ibeis_rman['hesaff'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pydarknet'):
script = ibeis_rman['pydarknet'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pyrf'):
script = ibeis_rman['pyrf'].get_script('build')
script.exec_()
if GET_ARGFLAG('--torch'):
# Theano and lasange code should be moved to pytorch
tpl_rman['pytorch'].clone(recursive=True)
tpl_rman['pytorch'].issue('git submodule update --init')
tpl_rman['pytorch'].issue('python setup install')
tpl_rman['pytorch'].issue('pip install torchvision')
# tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install')
# tpl_rman['pytorch'].issue('pip install -e .')
if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'):
tpl_rman['libgpuarray'].clone()
script = tpl_rman['libgpuarray'].get_script('build')
script.exec_()
if GET_ARGFLAG('--dcnn'):
tpl_rman['theano'].clone()
# tpl_rman['pylearn2'].clone()
tpl_rman['lasagne'].clone()
tpl_rman['theano'].issue('pip install -e .')
# tpl_rman['pylearn2'].issue('pip install -e .')
tpl_rman['lasagne'].issue('pip install -e .')
# tpl_rman['pylearn2'].python_develop()
# tpl_rman['theano'].python_develop()
# tpl_rman['lasagne'].python_develop()
#_===
if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'):
missing_dynlib = tpl_rman.check_cpp_build()
missing_dynlib += ibeis_rman.check_cpp_build()
missing_install = tpl_rman.check_installed()
missing_install += ibeis_rman.check_installed()
problems = []
problems += ibeis_rman.check_importable()
problems += tpl_rman.check_importable()
if GET_ARGFLAG('--fix'):
print('Trying to fix problems')
for repo in missing_dynlib:
repo.custom_build()
for repo, recommended_fix in problems:
print('Trying to fix repo = %r' % (repo,))
print(' * recommended_fix = %r' % (recommended_fix,))
if recommended_fix == 'rebuild':
repo.custom_build()
print('Can currently only fix one module at a time. Please re-run')
sys.exit(1)
else:
print('Not sure how to fix %r' % (repo,))
if GET_ARGFLAG('--pull'):
ibeis_rman.issue('git pull')
if GET_ARGFLAG('--build'):
# Build tpl repos
# tpl_rman.custom_build()
# ibeis_rman.custom_build()
# Build only IBEIS repos with setup.py
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd))
# Like install, but better if you are developing
if GET_ARGFLAG('--develop'):
_rman = ibeis_rman.only_with_pysetup()
# # _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd),
# # sudo=not ut.in_virtual_env())
_rman.issue('{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd),
sudo=not ut.in_virtual_env())
if GET_ARGFLAG('--clean'):
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--install'):
print('WARNING: Dont use install if you are a developer. Use develop instead.')
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('python setup.py install'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--push'):
ibeis_rman.issue('git push')
if GET_ARGFLAG('--branch'):
ibeis_rman.issue('git branch')
sys.exit(0)
if GET_ARGFLAG('--tag-status'):
ibeis_rman.issue('git tag')
# Tag everything
tag_name = GET_ARGVAL('--newtag', type_=str, default=None)
if tag_name is not None:
ibeis_rman.issue('git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'.format(**locals()))
ibeis_rman.issue('git push --tags')
if GET_ARGFLAG('--bext'):
ibeis_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format(pythoncmd=pythoncmd))
commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
if commit_msg is not None:
ibeis_rman.issue('git commit -am "{commit_msg}"'.format(**locals()))
# Change Branch
branch_name = GET_ARGVAL('--checkout', type_=str, default=None)
if branch_name is not None:
try:
ibeis_rman.issue('git checkout "{branch_name}"'.format(**locals()))
except Exception:
print('ERROR: Could not checkout branch: %r' % (branch_name, ))
# Creates new branches
newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None)
if newbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newbranch_name}"'.format(**locals()))
ibeis_rman.issue('git push --set-upstream origin {newbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
newlocalbranch_name = GET_ARGVAL('--newlocalbranch', type_=str, default=None)
if newlocalbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newlocalbranch_name}"'.format(**locals()))
#rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None)
if mergebranch_name is not None:
ibeis_rman.issue('git merge "{mergebranch_name}"'.format(**locals()))
# Change ownership
if GET_ARGFLAG('--serverchmod'):
ibeis_rman.issue('chmod -R 755 *')
if GET_ARGFLAG('--chown'):
# Fixes problems where repos are checked out as root
username = os.environ.get('USERNAME', ut.get_argval('--username'))
if username is None:
username = os.environ.get('USER', None)
if username is None:
raise AssertionError('cannot find username in commandline or environment vars')
usergroup = username
ibeis_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()),
sudo=True)
upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None)
if upstream_branch is not None:
# git 2.0
ibeis_rman.issue('git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}'.format(**locals()))
upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None)
if upstream_push is not None:
ibeis_rman.issue('git push --set-upstream origin {upstream_push}'.format(**locals()))
if GET_ARGFLAG('--test'):
failures = []
for repo_dpath in ibeis_rman.repo_dirs:
# ut.getp_
mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False,
only_packages=True)
modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
print('Checking modules = %r' % (modname_list,))
for modname in modname_list:
try:
ut.import_modname(modname)
print(modname + ' success')
except ImportError as ex:
failures += [modname]
print(modname + ' failure')
print('failures = %s' % (ut.repr3(failures),))
if False:
try:
from six.moves import input
except ImportError:
input = raw_input # NOQA
# General global git command
gg_cmd = GET_ARGVAL('--gg', None) # global command
if gg_cmd is not None:
ans = 'yes' if GET_ARGFLAG('-y') else input('Are you sure you want to run: %r on all directories? ' % (gg_cmd,))
if ans == 'yes':
ibeis_rman.issue(gg_cmd)
def is_running_as_root():
"""
References:
http://stackoverflow.com/questions/5721529/running-python-script-as-root
http://stackoverflow.com/questions/2806897/checking-script-has-root
"""
return os.getenv('USER') == 'root'
def get_sysinfo(verbose=0):
if verbose:
print('USER = %r' % os.getenv("USER"))
if is_running_as_root():
print('Do not run super_setup.py as root')
sys.exit(1)
WIN32 = sys.platform.startswith('win32')
if verbose:
print('[super_setup] __IBEIS_SUPER_SETUP__')
if 'CODE_DIR' in os.environ:
CODE_DIR = os.environ.get('CODE_DIR')
else:
CODE_DIR = dirname(dirname(realpath(__file__))) # Home is where the .. is. # '~/code'
if verbose:
print('[super_setup] code_dir: %r' % CODE_DIR)
(DISTRO, DISTRO_VERSION, DISTRO_TAG) = platform.dist()
python_version = platform.python_version()
PY2 = python_version.startswith('2.7')
PY3 = python_version.startswith('3')
# '--py3' in sys.argv
# assert PY3 or
# 'IBEIS currently supports python 2.7, Instead got python=%r. use --py3 to override' % python_version
pythoncmd = sys.executable
# if PY2:
# pythoncmd = 'python' if WIN32 else 'python2.7'
# elif PY3:
# pythoncmd = 'python3'
return CODE_DIR, pythoncmd, WIN32, PY2, PY3
def main():
print('''
IBEIS Image Analysis (IA)
____ _ _ ___ ____ ____ ____ ____ ___ _ _ ___
[__ | | |__] |___ |__/ [__ |___ | | | |__]
___] |__| | |___ | \ ___] |___ | |__| |
Use --help to show usage
''')
show_usage = len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']
if show_usage:
print(USAGE)
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo(verbose=1)
try:
import cv2 # NOQA
except ImportError:
print('Need to install OpenCV')
print('python super_setup.py --opencv')
try:
import pyflann # NOQA
except ImportError:
print('Need to install FLANN')
print('python super_setup.py --flann')
try:
import theano, lasagne # NOQA
except ImportError:
print('Need to install Theano/Lasagne/Pylearn2')
print('python super_setup.py --dcnn')
except ValueError as ex:
print(repr(ex))
print('Probably need libgpu array')
print('python super_setup.py --libgpuarray')
try:
try:
import PyQt4 # NOQA
except ImportError:
import PyQt5 # NOQA
except ImportError:
print('Need to install PyQt')
print('python super_setup.py --pyqt')
if '--bootstrap' in sys.argv or 'bootstrap' in sys.argv:
bootstrap(WIN32)
try:
# HACK IN A WAY TO ENSURE UTOOL
print('Checking utool')
import utool as ut # NOQA
except Exception:
ensure_utool(CODE_DIR, pythoncmd)
tpl_rman, ibeis_rman = initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3)
execute_commands(tpl_rman, ibeis_rman)
if __name__ == '__main__':
main()
| 34.715412
| 166
| 0.604485
|
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import dirname, realpath
import platform
import sys
import os
USAGE = ('''
--- USAGE ---
IBEIS (IMAGE ANALYSIS) SUPER SETUP
This script is meant to help setup, install, and update the developer
enviroment for IBEIS Image Analysis.
****
# Step 1 Initial Development Prereqs:
The first thing is to ensure you have a baseline development enviroment (gcc,
fortran, cmake, blas, git, pip, etc...). This should work well for apt-get,
yum, and macports package managers. It is possible to use Windows MinGW, but
it is not well supported.
The following command outputs the commands to install these prereq packages.
python super_setup.py --bootstrap
****
# Step 2 - utool
Just running the script will download and install utool --- a utility library
used in all aspects of the system.
python super_setup.py
****
# Step 3 - Download / Update Image Analysis Packages
Running the script again once utool is installed will ensure the rest of the
repositories are cloned and on your machine in the directory above this one, or
in a custom location set by your $CODE_DIR environment variable. Running with
the pull command will update the packages as well.
python super_setup.py pull
Note: if you have wildme credientials you can run this to setup git
python super_setup.py pull --move-wildme-ssh
****
# Step 3.5 - Grab and Build Extern libraries with scripts
python super_setup.py --opencv
python super_setup.py --hesaff
python super_setup.py --flann
python super_setup.py --dcnn
python super_setup.py --pydarknet
python super_setup.py --pyqt
python super_setup.py --pyrf
****
# Step 4 - Build C++ components.
Some submodles require C++ libraries. Build them using the following Command.
python super_setup.py build
****
# Step 5 - Install the system.
Register these packages with the python enviroment.
# Install external modules
python super_setup.py --develop
# Install the ibeis module
pip install -e .
--- /USAGE ---
''')
def define_argparse():
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ''), action='store_true',
default=False, help=help)
_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme',
help='changes to the wildme repos')
args = parser.parse_args()
return args
def get_plat_specifier():
import setuptools
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier
def import_module_from_fpath(module_fpath):
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module
def bootstrap(WIN32):
if WIN32:
win32bootstrap_fpath = os.path.abspath('_scripts/win32bootstrap.py')
win32bootstrap = import_module_from_fpath(win32bootstrap_fpath)
win32bootstrap.bootstrap_sysreq()
else:
bootstrap_fpath = os.path.abspath('_scripts/bootstrap.py')
bootstrap = import_module_from_fpath(bootstrap_fpath)
bootstrap.bootstrap_sysreq()
sys.exit(0)
tattr(sys, 'prefix', None),))
in_venv = False
if hasattr(sys, 'real_prefix'):
in_venv = True
elif hasattr(sys, 'base_prefix'):
in_venv = sys.base_prefix != sys.prefix
return in_venv
def ensure_utool(CODE_DIR, pythoncmd):
WIN32 = sys.platform.startswith('win32')
UTOOL_BRANCH = 'next'
UTOOL_REPO = 'https://github.com/WildbookOrg/utool.git'
print('WARNING: utool is not found')
print('Attempting to get utool. Enter (y) to continue')
if '-y' in sys.argv:
ans = 'y'
else:
try:
ans = input('Enter y to continue. Anything else to exit...\n')
except:
ans = raw_input('Enter y to continue. Anything else to exit...\n')
if ans != 'y':
print('Please install utool to continue')
sys.exit(0)
cwdpath = os.path.realpath(os.getcwd())
usr_code_dir = os.path.expanduser(CODE_DIR)
os.chdir(usr_code_dir)
print("user code dir = %r" % usr_code_dir)
print('cloning utool')
if not os.path.exists('utool'):
syscmd('git clone ' + UTOOL_REPO + ' -b ' + UTOOL_BRANCH)
os.chdir('utool')
print('pulling utool')
syscmd('git pull')
print('installing utool for development')
cmdstr = '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd)
if not WIN32 and not in_virtual_env():
cmdstr = 'sudo ' + cmdstr
syscmd(cmdstr)
os.chdir(cwdpath)
print('Please rerun super_setup.py')
print(' '.join(sys.argv))
sys.exit(1)
def initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3):
import utool as ut
WITH_CNN = True
WITH_QT = not ut.get_argflag('--no-qt')
WITH_GUI = not ut.get_argflag('--no-gui')
WITH_CUSTOM_TPL = True
WITH_PLUGINS = True
ibeis_rman = ut.RepoManager([
'https://github.com/WildbookOrg/utool.git',
'https://github.com/WildbookOrg/vtool.git',
'https://github.com/WildbookOrg/dtool.git',
'https://github.com/Erotemic/ubelt.git',
'https://github.com/WildbookOrg/detecttools.git',
], CODE_DIR, label='core', pythoncmd=pythoncmd)
tpl_rman = ut.RepoManager([], CODE_DIR, label='tpl', pythoncmd=pythoncmd)
if not GET_ARGFLAG('--ignore-opencv'):
cv_repo = ut.Repo('https://github.com/Itseez/opencv.git', CODE_DIR, modname='cv2')
tpl_rman.add_repo(cv_repo)
if WITH_GUI:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/plottool.git',
])
if WITH_QT:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/guitool.git',
])
tpl_rman.add_repo(ut.Repo(modname=('PyQt4', 'PyQt5', 'PyQt')))
if WITH_CUSTOM_TPL:
flann_repo = ut.Repo('https://github.com/WildbookOrg/flann.git', CODE_DIR, modname='pyflann')
ibeis_rman.add_repo(flann_repo)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/hesaff.git',
])
if WITH_CNN:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis_cnn.git',
'https://github.com/WildbookOrg/pydarknet.git',
'https://gitlab.com/bluemellophone/lightnet.git',
'https://gitlab.com/bluemellophone/brambox.git',
])
tpl_rman.add_repos([
'https://github.com/pytorch/pytorch.git',
])
tpl_rman.add_repos([
'https://github.com/Theano/libgpuarray.git',
])
tpl_rman.add_repos([
'https://github.com/Theano/Theano.git',
'https://github.com/Lasagne/Lasagne.git',
])
if WITH_PLUGINS:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis-flukematch-module.git',
'https://github.com/WildbookOrg/ibeis-curvrank-module.git',
'https://github.com/WildbookOrg/ibeis-deepsense-module.git',
'https://github.com/WildbookOrg/ibeis-finfindr-module.git',
'https://github.com/WildbookOrg/ibeis-kaggle7-module.git',
'https://github.com/WildbookOrg/pyrf.git',
])
if False:
ibeis_rman.add_repos([
])
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis.git',
])
define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3)
return tpl_rman, ibeis_rman
def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = '2' if sys.version_info.major == 3 else '3'
pyon = majorminor[0]
plat_spec = get_plat_specifier()
build_dname = 'cmake_builds/build' + plat_spec
script_fmtdict = {
'pyexe' : sys.executable,
'pyversion' : 'python' + '.'.join(majorminor),
'pypkg_var' : 'PYTHON' + pyon + '_PACKAGES_PATH',
'build_dname' : build_dname,
'pyoff' : pyoff,
'pyon' : pyon,
'cv_pyon_var' : 'BUILD_opencv_python' + pyon,
'cv_pyoff_var' : 'BUILD_opencv_python' + pyoff,
'plat_spec' : plat_spec,
'source_dpath' : '../..',
'libext' : ut.get_lib_ext(),
}
if os.environ.get('VIRTUAL_ENV', '') == '':
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if not os.path.exists(opencv_dir):
if not ut.get_argflag('--opencv'):
opencv_dir = ''
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
python_bash_setup = ut.codeblock(
r'''
# STARTBLOCK bash
if [[ "$VIRTUAL_ENV" == "" ]]; then
# The case where we are installying system-wide
# It is recommended that a virtual enviornment is used instead
export PYTHON_EXECUTABLE=$(which {pyversion})
if [[ '$OSTYPE' == 'darwin'* ]]; then
# Mac system info
export LOCAL_PREFIX=/opt/local
export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
else
# Linux system info
export LOCAL_PREFIX=/usr/local
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
fi
# No windows support here
else
# The prefered case where we are in a virtual environment
export PYTHON_EXECUTABLE=$(which python)
# export LOCAL_PREFIX=$VIRTUAL_ENV/local
export LOCAL_PREFIX=$VIRTUAL_ENV
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO=""
fi
echo "LOCAL_PREFIX = $LOCAL_PREFIX"
echo "{pypkg_var} = ${pypkg_var}"
# ENDBLOCK bash
'''
).format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
ibeis_rman['pyflann'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd {repo_dir}
mkdir -p {build_dname}
cd {build_dname}
cmake -G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE="Release" \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DBUILD_EXAMPLES=Off \
-DBUILD_TESTS=Off \
-DBUILD_PYTHON_BINDINGS=On \
-DBUILD_MATLAB_BINDINGS=Off \
-DBUILD_CUDA_LIB=Off\
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\
{source_dpath}
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict)
)
ibeis_rman['pyflann'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
# The pyflann source lives here
cd {repo_dir}/src/python
# Need to run build to move the libs to the build directory
python setup.py build
# Use pip to editable install
pip install -e {repo_dir}/src/python
# Old way of doing it
# But the setup script is generated during build
# python {repo_dir}/build/src/python/setup.py develop
python -c "import pyflann; print(pyflann.__file__)" --verb-flann
python -c "import pyflann; print(pyflann)" --verb-flann
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath)
)
ibeis_rman['hesaff'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/hesaff
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
cmake -G "Unix Makefiles" \
-DCMAKE_OSX_ARCHITECTURES=x86_64 \
-DCMAKE_C_COMPILER=clang2 \
-DCMAKE_CXX_COMPILER=clang2++ \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
else
cmake -G "Unix Makefiles" \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
fi
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
#make VERBOSE=1
cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
ibeis_rman['pydarknet'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pydarknet
mkdir -p {build_dname}
cd {build_dname}
if [[ "$(which nvcc)" == "" ]]; then
export CMAKE_CUDA=Off
else
export CMAKE_CUDA=On
fi
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pydarknet
cp -v lib*{libext} {source_dpath}/pydarknet
# cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
ibeis_rman['pyrf'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pyrf
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pyrf
cp -v lib*{libext} {source_dpath}/pyrf
# cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
tpl_rman['cv2'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
# Checkout opencv core
cd $CODE_DIR
# export REPO_DIR=$CODE_DIR/opencv
export REPO_DIR={repo_dpath}
# git clone https://github.com/Itseez/opencv.git
cd $REPO_DIR
# Checkout opencv extras
git clone https://github.com/Itseez/opencv_contrib.git
# cd opencv_contrib
# git pull
# cd ..
# git pull
mkdir -p $REPO_DIR/{build_dname}
cd $REPO_DIR/{build_dname}
cmake -G "Unix Makefiles" \
-D WITH_OPENMP=ON \
-D CMAKE_BUILD_TYPE=RELEASE \
-D {cv_pyoff_var}=Off \
-D {cv_pyon_var}=On \
-D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \
-D {pypkg_var}=${pypkg_var} \
-D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
-D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \
-D WITH_CUDA=Off \
-D BUILD_opencv_dnn=Off \
-D BUILD_opencv_dnn_modern=Off \
-D WITH_VTK=Off \
-D WITH_CUDA=Off \
-D WITH_MATLAB=Off \
$REPO_DIR
# -D WITH_OPENCL=Off \
# -D BUILD_opencv_face=Off \
# -D BUILD_opencv_objdetect=Off \
# -D BUILD_opencv_video=Off \
# -D BUILD_opencv_videoio=Off \
# -D BUILD_opencv_videostab=Off \
# -D BUILD_opencv_ximgproc=Off \
# -D BUILD_opencv_xobjdetect=Off \
# -D BUILD_opencv_xphoto=Off \
# -D BUILD_opencv_datasets=Off \
# -D CXX_FLAGS="-std=c++11" \ %TODO
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath),
**script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/opencv/{build_dname}
$_SUDO make install
# Hack because cv2 does not want to be installed for some reason
# cp lib/cv2.so $PYTHON_PACKAGES_PATH
# Seems to work now that local is removed from prefix
# cp -v lib/cv2.so $PYTHON_PACKAGES_PATH
# Test makesure things working
python -c "import numpy; print(numpy.__file__)"
python -c "import numpy; print(numpy.__version__)"
python -c "import cv2; print(cv2.__version__)"
python -c "import cv2; print(cv2.__file__)"
#python -c "import vtool"
# Check if we have contrib modules
python -c "import cv2; print(cv2.xfeatures2d)"
# ENDBLOCK
''').format(**script_fmtdict))
tpl_rman['libgpuarray'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
# Ensure the repo was checked out
if [ ! -d {repo_dpath} ]; then
git clone https://github.com/Theano/libgpuarray.git {repo_dpath}
fi
{python_bash_setup}
cd {repo_dpath}
# need a specific version of libgpuarray
git checkout tags/v0.6.2 -b v0.6.2
mkdir -p {repo_dpath}/{build_dname}
cd {repo_dpath}/{build_dname}
# First build the C library
cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
$_SUDO make install
# Now build the python libarary
cd {repo_dpath}
python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include
python setup.py build
# python setup.py install
$_SUDO pip install -e {repo_dpath}
# DEVICE="<test device>" python -c "import pygpu;pygpu.test()"
# DEVICE="gpu0" python -c "import pygpu;pygpu.test()"
cd ~
$_SUDO pip install nose
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
# pip uninstall pygpu
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath),
**script_fmtdict))
if ut.in_virtual_env():
try:
fmtdict = {
'sys_dist_packages': ut.get_global_dist_packages_dir(),
'venv_site_packages': ut.get_site_packages_dir(),
'pyqt' : 'PyQt4' if PY2 else 'PyQt5',
'debian-python-qt' : (
'python-qt4' if PY2 else
'qt5-default python3-pyqt5 debian-python-qt-svg'),
'pip-python-qt' : 'python-qt4' if PY2 else 'python-qt5'
}
system_to_venv = ut.codeblock(
r'''
# STARTBLOCK bash
# Creates a symlink to the global PyQt in a virtual env
export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"
export VENV_DIST_PACKAGES="{venv_site_packages}"
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
echo "have qt"
ls $GLOBAL_DIST_PACKAGES/{pyqt}
ls $VENV_DIST_PACKAGES/{pyqt}
else
# Ensure PyQt is installed first (FIXME make this work for non-debian systems)
sudo apt-get install {debian-python-qt}
# pip install {pip-python-qt}
fi
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
# Install system pyqt packages to virtual envirment via symlink
ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}
ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/
ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/
else
echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"
fi
echo "testing"
python -c "import {pyqt}; print({pyqt})"
# ENDBLOCK bash
''').format(**fmtdict)
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass
def GET_ARGFLAG(arg, *args, **kwargs):
import utool as ut
return arg.lstrip('--') in sys.argv or ut.get_argflag(arg, *args, **kwargs)
def move_wildme(ibeis_rman, fmt):
wildme_user = 'WildbookOrg'
wildme_remote = 'wildme'
for repo in ibeis_rman.repos:
try:
gitrepo = repo.as_gitpython()
except Exception:
repo.change_url_format(fmt)
print('repo {!r} does not exist yet'.format(repo))
continue
wildme_url = repo._new_remote_url(host='github.com', user=wildme_user, fmt=fmt)
remotes = repo.remotes
message = 'Checking %s for move to wildme' % (repo,)
print(message)
incorrect_version = repo._ensure_remote_exists(wildme_remote, wildme_url)
if 'origin' in remotes:
try:
origin = remotes['origin']
origin_protocol = origin['url'].split(':')[0]
origin_user = origin['username']
if origin_user != wildme_user or origin_protocol != fmt or incorrect_version:
if origin_user not in remotes:
origin_url = origin['url']
print(' * Create remote %r: %r' % (origin_user, origin_url,))
gitrepo.create_remote(origin_user, origin_url)
gitorigin = gitrepo.remote('origin')
print(' * Change origin url to %r' % (wildme_url,))
gitorigin.set_url(wildme_url)
except:
print('\tWARNING: COULD NOT MIGRATE REPO = %r' % (repo, ))
repo.change_url_format(fmt)
def execute_commands(tpl_rman, ibeis_rman):
import utool as ut
GET_ARGVAL = ut.get_argval
ut.init_catch_ctrl_c()
if 0:
print('Version Check Source:')
for repo in tpl_rman.repos:
print('python -c "import {0}; print({0}.__file__)"'.format(repo.modname))
print('python -c "import {0}; print({0}.__version__)"'.format(repo.modname))
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo()
print('ibeis_rman = %r' % (ibeis_rman,))
wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG('--move-wildme-ssh')
wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG('--move-wildme-http')
if wildme_ssh_flags or wildme_https_flags:
fmt = 'ssh' if wildme_ssh_flags else 'https'
move_wildme(ibeis_rman, fmt)
if GET_ARGFLAG('--status'):
ibeis_rman.issue('git status')
sys.exit(0)
ibeis_rman.ensure()
if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'):
dpath = '_super_scripts/' + 'scripts' + get_plat_specifier()
ut.ensuredir(dpath)
dumps = [
(tpl_rman, 'cv2', 'build'),
(tpl_rman, 'cv2', 'install'),
(ibeis_rman, 'flann', 'build'),
(ibeis_rman, 'flann', 'install'),
(ibeis_rman, 'hesaff', 'build'),
(tpl_rman, 'PyQt', 'system_to_venv'),
(tpl_rman, 'libgpuarray', 'build'),
]
for rman, mod, sname in dumps:
from os.path import join
script = rman[mod].get_script(sname).text
suffix = get_plat_specifier()
sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh')
ut.write_to(sh_fpath, script)
if GET_ARGFLAG('--requirements'):
ut.cmd('pip install -r requirements.txt')
if GET_ARGFLAG('--opencv'):
cv_repo = tpl_rman['cv2']
cv_repo.clone()
script = cv_repo.get_script('build')
script.exec_()
cv_repo = tpl_rman['cv2']
script = cv_repo.get_script('install')
script.exec_()
if GET_ARGFLAG('--flann'):
script = ibeis_rman['flann'].get_script('build')
script.exec_()
script = ibeis_rman['flann'].get_script('install')
script.exec_()
if GET_ARGFLAG('--pyqt'):
script = tpl_rman['PyQt'].get_script('system_to_venv')
script.exec_()
if GET_ARGFLAG('--hesaff'):
script = ibeis_rman['hesaff'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pydarknet'):
script = ibeis_rman['pydarknet'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pyrf'):
script = ibeis_rman['pyrf'].get_script('build')
script.exec_()
if GET_ARGFLAG('--torch'):
tpl_rman['pytorch'].clone(recursive=True)
tpl_rman['pytorch'].issue('git submodule update --init')
tpl_rman['pytorch'].issue('python setup install')
tpl_rman['pytorch'].issue('pip install torchvision')
if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'):
tpl_rman['libgpuarray'].clone()
script = tpl_rman['libgpuarray'].get_script('build')
script.exec_()
if GET_ARGFLAG('--dcnn'):
tpl_rman['theano'].clone()
tpl_rman['lasagne'].clone()
tpl_rman['theano'].issue('pip install -e .')
tpl_rman['lasagne'].issue('pip install -e .')
if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'):
missing_dynlib = tpl_rman.check_cpp_build()
missing_dynlib += ibeis_rman.check_cpp_build()
missing_install = tpl_rman.check_installed()
missing_install += ibeis_rman.check_installed()
problems = []
problems += ibeis_rman.check_importable()
problems += tpl_rman.check_importable()
if GET_ARGFLAG('--fix'):
print('Trying to fix problems')
for repo in missing_dynlib:
repo.custom_build()
for repo, recommended_fix in problems:
print('Trying to fix repo = %r' % (repo,))
print(' * recommended_fix = %r' % (recommended_fix,))
if recommended_fix == 'rebuild':
repo.custom_build()
print('Can currently only fix one module at a time. Please re-run')
sys.exit(1)
else:
print('Not sure how to fix %r' % (repo,))
if GET_ARGFLAG('--pull'):
ibeis_rman.issue('git pull')
if GET_ARGFLAG('--build'):
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--develop'):
_rman = ibeis_rman.only_with_pysetup()
sudo=not ut.in_virtual_env())
if GET_ARGFLAG('--clean'):
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--install'):
print('WARNING: Dont use install if you are a developer. Use develop instead.')
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('python setup.py install'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--push'):
ibeis_rman.issue('git push')
if GET_ARGFLAG('--branch'):
ibeis_rman.issue('git branch')
sys.exit(0)
if GET_ARGFLAG('--tag-status'):
ibeis_rman.issue('git tag')
tag_name = GET_ARGVAL('--newtag', type_=str, default=None)
if tag_name is not None:
ibeis_rman.issue('git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'.format(**locals()))
ibeis_rman.issue('git push --tags')
if GET_ARGFLAG('--bext'):
ibeis_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format(pythoncmd=pythoncmd))
commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
if commit_msg is not None:
ibeis_rman.issue('git commit -am "{commit_msg}"'.format(**locals()))
branch_name = GET_ARGVAL('--checkout', type_=str, default=None)
if branch_name is not None:
try:
ibeis_rman.issue('git checkout "{branch_name}"'.format(**locals()))
except Exception:
print('ERROR: Could not checkout branch: %r' % (branch_name, ))
newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None)
if newbranch_name is not None:
ibeis_rman.issue('git checkout -b "{newbranch_name}"'.format(**locals()))
ibeis_rman.issue('git push --set-upstream origin {newbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
newlocalbranch_name = GET_ARGVAL('--newlocalbranch', type_=str, default=None)
if newlocalbranch_name is not None:
ibeis_rman.issue('git checkout -b "{newlocalbranch_name}"'.format(**locals()))
#rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None)
if mergebranch_name is not None:
ibeis_rman.issue('git merge "{mergebranch_name}"'.format(**locals()))
if GET_ARGFLAG('--serverchmod'):
ibeis_rman.issue('chmod -R 755 *')
if GET_ARGFLAG('--chown'):
username = os.environ.get('USERNAME', ut.get_argval('--username'))
if username is None:
username = os.environ.get('USER', None)
if username is None:
raise AssertionError('cannot find username in commandline or environment vars')
usergroup = username
ibeis_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()),
sudo=True)
upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None)
if upstream_branch is not None:
ibeis_rman.issue('git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}'.format(**locals()))
upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None)
if upstream_push is not None:
ibeis_rman.issue('git push --set-upstream origin {upstream_push}'.format(**locals()))
if GET_ARGFLAG('--test'):
failures = []
for repo_dpath in ibeis_rman.repo_dirs:
mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False,
only_packages=True)
modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
print('Checking modules = %r' % (modname_list,))
for modname in modname_list:
try:
ut.import_modname(modname)
print(modname + ' success')
except ImportError as ex:
failures += [modname]
print(modname + ' failure')
print('failures = %s' % (ut.repr3(failures),))
if False:
try:
from six.moves import input
except ImportError:
input = raw_input
gg_cmd = GET_ARGVAL('--gg', None)
if gg_cmd is not None:
ans = 'yes' if GET_ARGFLAG('-y') else input('Are you sure you want to run: %r on all directories? ' % (gg_cmd,))
if ans == 'yes':
ibeis_rman.issue(gg_cmd)
def is_running_as_root():
return os.getenv('USER') == 'root'
def get_sysinfo(verbose=0):
if verbose:
print('USER = %r' % os.getenv("USER"))
if is_running_as_root():
print('Do not run super_setup.py as root')
sys.exit(1)
WIN32 = sys.platform.startswith('win32')
if verbose:
print('[super_setup] __IBEIS_SUPER_SETUP__')
if 'CODE_DIR' in os.environ:
CODE_DIR = os.environ.get('CODE_DIR')
else:
CODE_DIR = dirname(dirname(realpath(__file__))) erbose:
print('[super_setup] code_dir: %r' % CODE_DIR)
(DISTRO, DISTRO_VERSION, DISTRO_TAG) = platform.dist()
python_version = platform.python_version()
PY2 = python_version.startswith('2.7')
PY3 = python_version.startswith('3')
pythoncmd = sys.executable
return CODE_DIR, pythoncmd, WIN32, PY2, PY3
def main():
print('''
IBEIS Image Analysis (IA)
____ _ _ ___ ____ ____ ____ ____ ___ _ _ ___
[__ | | |__] |___ |__/ [__ |___ | | | |__]
___] |__| | |___ | \ ___] |___ | |__| |
Use --help to show usage
''')
show_usage = len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']
if show_usage:
print(USAGE)
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo(verbose=1)
try:
import cv2
except ImportError:
print('Need to install OpenCV')
print('python super_setup.py --opencv')
try:
import pyflann
except ImportError:
print('Need to install FLANN')
print('python super_setup.py --flann')
try:
import theano, lasagne
except ImportError:
print('Need to install Theano/Lasagne/Pylearn2')
print('python super_setup.py --dcnn')
except ValueError as ex:
print(repr(ex))
print('Probably need libgpu array')
print('python super_setup.py --libgpuarray')
try:
try:
import PyQt4
except ImportError:
import PyQt5
except ImportError:
print('Need to install PyQt')
print('python super_setup.py --pyqt')
if '--bootstrap' in sys.argv or 'bootstrap' in sys.argv:
bootstrap(WIN32)
try:
print('Checking utool')
import utool as ut
except Exception:
ensure_utool(CODE_DIR, pythoncmd)
tpl_rman, ibeis_rman = initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3)
execute_commands(tpl_rman, ibeis_rman)
if __name__ == '__main__':
main()
| true
| true
|
79075f4f56f8bc844adf569e2ae0dc947f97ad32
| 33,843
|
py
|
Python
|
main.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
main.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
main.py
|
MartimChaves/ret_detect
|
774521a079be4324d542a841c7b3be808c18356b
|
[
"MIT"
] | null | null | null |
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40): # Get train data
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
#Change here!
#test_image_mask = centreMask
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
#test_image_RC = removeBackground(test_image_RC)
#test_image = removeBackground(test_image)
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr)
# Distance Array
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))#,
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
'''
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
'''
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
# test_img_GC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))#,,greenChannel_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
# predicts
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
# Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"img_pred_Log_" + str(j))
#myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#smoothDisk = mean(smoothVessels, disk(5))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#coords = ODROIBay_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
#medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
#print('except')
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#myShowImage(smoothVessels)
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr) # len = 4577126
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"Log")
#myShowImage(img_pred_Bayes,"Bayes")
#myShowImage(test_image,"Actual")
finalMaskPredicts.append(predictsBayes)
#print('ok')
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
#originalDims = originalDims or elips[i]
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
#originalDims[elipsIndexs] = 255
indexsOD_ELi = np.where(originalDims != 0)
#myShowImage(originalDims,str(i))
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
#checkResults[0::,np.min(elipsIndexs[1])] = 255 # left
#checkResults[0::,np.max(elipsIndexs[1])] = 255 # right
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
#isleft -> walk right
#relevantColumn = coordsReal[1] + 30 # based on centroid
relevantColumn = colsCoordReal[1] - 10 # based on
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
#isright -> walk left
#relevantColumn = coordsReal[1] - 30
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0 # 1 - columnROI_f[0]
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0 # 3 - columnROI_f[1]
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0 # 0 - relevantRows[0][0]
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0 # 2 - relevantRows[0][-1]
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
#myShowImage(originalDims,str(i))
#myShowImage(checkResults,str(i))
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
#print(row)
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
'''for i in range(0,40):
myShowImage(image_arr[i])
myShowImage(image_arr_red_channels[i])
myShowImage(image_arr_green_channels[i])
myShowImage(vessels[i])
myShowImage(entropy_arr[i])'''
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40): # not the best way...
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
#print(removeLen)
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3]) # 3187816 = number of points in all fovea bboxs
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
'''if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp'''
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
#mask
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
'''if (i-1)*bboxShape[0]*bboxShape[1] < 0 and (i)*bboxShape[0]*bboxShape[1] == 0:
X_train[(i-1)*bboxShape[0]*bboxShape[1]::,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[(i-1)*bboxShape[0]*bboxShape[1]::,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
continue'''
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
'''log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()'''
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
#myShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")
#myShowImage(maskEyes,"Mask")
#myShowImage(img_pred_Bayes,"Bay")
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
#centroidCoord_Arr = np.load("centroidCoord_Arr.npy")
#ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")
#for i in range(0,40):
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
## import some data to play with
#iris = datasets.load_iris()
## Take the first two features. We could avoid this by using a two-dim dataset
#X = iris.data[:, :2]
#y = iris.target
X = X_train_2
y = y_train_2
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
models = (clf_bayes, clf_log) #, clf_svm, clf_svm_rbf)
# title for the plots
titles = ('Bayes',
'Logistic regression')
''' ,
'SVC with linear kernel',
'SVM with RBF kernel')'''
# Set-up 2x2 grid for plotting.
#fig, sub =
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
'''_,ax_all = plt.subplots(1,2)
ax = ax_all[1]
plot_contours(ax, clf_bayes, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Bayes")
plt.show()'''
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True, # changed proba to probability
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done")
| 40.098341
| 215
| 0.625476
|
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40):
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr)
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
finalMaskPredicts.append(predictsBayes)
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2)
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
indexsOD_ELi = np.where(originalDims != 0)
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
tColumn = colsCoordReal[1] - 10
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40):
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3])
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
titles = ('Bayes',
'Logistic regression')
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done")
| true
| true
|
79075f56d229c2d2a73820a398f773add23bebeb
| 6,981
|
py
|
Python
|
smacha_ros/doc/conf.py
|
ReconCell/smacha
|
253215a35d2d091bf50c28c1ba876209b82d2400
|
[
"BSD-3-Clause"
] | 16
|
2019-04-16T07:44:30.000Z
|
2022-03-10T08:04:45.000Z
|
smacha_ros/doc/conf.py
|
ReconCell/smacha
|
253215a35d2d091bf50c28c1ba876209b82d2400
|
[
"BSD-3-Clause"
] | 2
|
2019-07-18T09:11:00.000Z
|
2019-09-26T10:21:26.000Z
|
smacha_ros/doc/conf.py
|
ReconCell/smacha
|
253215a35d2d091bf50c28c1ba876209b82d2400
|
[
"BSD-3-Clause"
] | 2
|
2019-08-21T20:14:54.000Z
|
2019-09-19T13:26:34.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import catkin_pkg.package
catkin_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
catkin_package = catkin_pkg.package.parse_package(os.path.join(catkin_dir, catkin_pkg.package.PACKAGE_MANIFEST_FILENAME))
# -- Project information -----------------------------------------------------
project = 'SMACHA ROS'
copyright = '2019, ReconCell'
author = 'Barry Ridge'
# The short X.Y version
# version = ''
version = catkin_package.version
# The full version, including alpha/beta/rc tags
# release = '0.0.1'
release = catkin_package.version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': 'https://reconcell.gitlab.io/smacha/smacha_ros/'
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'vcs_pageview_mode': '',
# 'style_nav_header_background': 'white',
# # Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'smacha_rosdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'smacha_ros.tex', 'smacha\\_ros package API',
'Barry Ridge', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
author, 'smacha_ros', 'SMACHA is a meta-scripting, templating, and code generation engine for rapid prototyping of ROS SMACH state machines.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.753304
| 147
| 0.655064
|
import os
import catkin_pkg.package
catkin_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
catkin_package = catkin_pkg.package.parse_package(os.path.join(catkin_dir, catkin_pkg.package.PACKAGE_MANIFEST_FILENAME))
project = 'SMACHA ROS'
copyright = '2019, ReconCell'
author = 'Barry Ridge'
version = catkin_package.version
release = catkin_package.version
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
html_theme = 'sphinx_rtd_theme'
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
html_theme_options = {
'canonical_url': 'https://reconcell.gitlab.io/smacha/smacha_ros/'
}
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'smacha_rosdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'smacha_ros.tex', 'smacha\\_ros package API',
'Barry Ridge', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
author, 'smacha_ros', 'SMACHA is a meta-scripting, templating, and code generation engine for rapid prototyping of ROS SMACH state machines.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
790760cb05951c001a9580530426442d4df1318a
| 108
|
py
|
Python
|
topics/05-data-wrangling/consume.py
|
ralec911/missing-course
|
a297768cf7e7162fabd6b8a1d3041c2d3faabc22
|
[
"BSD-3-Clause"
] | 8
|
2021-02-10T15:58:28.000Z
|
2022-01-09T01:55:41.000Z
|
topics/05-data-wrangling/consume.py
|
ralec911/missing-course
|
a297768cf7e7162fabd6b8a1d3041c2d3faabc22
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T01:35:42.000Z
|
2021-02-27T20:41:38.000Z
|
topics/05-data-wrangling/consume.py
|
ralec911/missing-course
|
a297768cf7e7162fabd6b8a1d3041c2d3faabc22
|
[
"BSD-3-Clause"
] | 27
|
2021-01-11T16:23:24.000Z
|
2022-01-09T02:04:03.000Z
|
#!/usr/bin/env python3 -u
import sys
for value in sys.stdin:
sys.stderr.write(f"consumed {value}\n")
| 13.5
| 43
| 0.675926
|
import sys
for value in sys.stdin:
sys.stderr.write(f"consumed {value}\n")
| true
| true
|
790761dc66f4e5f333d71022468cb222559c8e95
| 69,394
|
py
|
Python
|
keras/preprocessing/image.py
|
mendesmiguel/keras
|
bf1378f39d02b7d0b53ece5458f9275ac8208046
|
[
"MIT"
] | 2
|
2019-09-17T22:01:41.000Z
|
2020-05-30T05:48:14.000Z
|
keras/preprocessing/image.py
|
HangJie720/keras
|
bf1378f39d02b7d0b53ece5458f9275ac8208046
|
[
"MIT"
] | null | null | null |
keras/preprocessing/image.py
|
HangJie720/keras
|
bf1378f39d02b7d0b53ece5458f9275ac8208046
|
[
"MIT"
] | 3
|
2019-08-12T18:15:17.000Z
|
2021-06-20T19:40:13.000Z
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
| 41.678078
| 124
| 0.573119
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try:
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError:
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try:
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError:
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
raise NotImplementedError
class NumpyArrayIterator(Iterator):
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
| true
| true
|
790761dda3807d309f2daf1ae9f05eede0666dc8
| 6,464
|
py
|
Python
|
behave_tests/steps/create_question.py
|
Sindhuja-SRL/back-end
|
d84dae8ed212913339dec646b46a67fcc0b77f52
|
[
"MIT"
] | null | null | null |
behave_tests/steps/create_question.py
|
Sindhuja-SRL/back-end
|
d84dae8ed212913339dec646b46a67fcc0b77f52
|
[
"MIT"
] | null | null | null |
behave_tests/steps/create_question.py
|
Sindhuja-SRL/back-end
|
d84dae8ed212913339dec646b46a67fcc0b77f52
|
[
"MIT"
] | 1
|
2022-03-11T01:45:39.000Z
|
2022-03-11T01:45:39.000Z
|
from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from host.models import Event
use_step_matcher("re")
# @given("that I am a registered host of privilege walk events and want to create questions and answer choices for the event")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "twelve@testtamu.edu"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my correct username, questions, answer choices and correct eventid")
# def step_impl(context):
# data = {
# "event_id": context.eventId,
# "title": "The question's title goes here",
# "choices": [
# {
# "description": "Pizza",
# "value": 1
# },
# {
# "description": "Ice Cream",
# "value": 2
# },
# {
# "description": "Salt Water",
# "value": -1
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 200 and resp.status_code < 300
# context.api_response_data = resp.json()
# @then("I expect the response that gives the status and id of the created question")
# def step_impl(context):
# assert context.api_response_data["status"] == "created"
# assert context.api_response_data["id"] != ""
# @given("that I am a registered host of privilege walk and wants to create questions but with wrong eventid")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "twelve@testtamu.edu"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my username, questions, answer choices and wrong event id")
# def step_impl(context):
# data = {
# "event_id": 12,
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id doesn't exist")
# def step_impl(context):
# pass
# @given("that I am a registered host of privilege walk and wants to create questions but without giving eventid")
# def step_impl(context):
# context.username = "12thMan"
# @when("I make an API call to create questions API with my username, questions, answer choices and without event id")
# def step_impl(context):
# data = {
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id is missing")
# def step_impl(context):
# pass
@given("that I am a registered host of privilege walk events and want to create questions but forgets to give username")
def step_impl(context):
context.username = "11thMan"
@when("I make an API call to create questions API with missing username in request")
def step_impl(context):
data = {
"title": "Are you under 20?",
"choices": [
{
"description": "Yes",
"value": "1"
},
{
"description": "No",
"value": "-1"
}
]
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response that says questions cannot be created and username is required in request")
def step_impl(context):
assert context.api_response_data["detail"] == "Authentication credentials were not provided."
| 29.925926
| 126
| 0.591894
|
from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from host.models import Event
use_step_matcher("re")
# "choices": [
# {
# "description": "Pizza",
# "value": 1
# },
# {
# "description": "Ice Cream",
# "value": 2
# },
# {
# "description": "Salt Water",
# "value": -1
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 200 and resp.status_code < 300
# context.api_response_data = resp.json()
# @then("I expect the response that gives the status and id of the created question")
# def step_impl(context):
# assert context.api_response_data["status"] == "created"
# assert context.api_response_data["id"] != ""
# @given("that I am a registered host of privilege walk and wants to create questions but with wrong eventid")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "twelve@testtamu.edu"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my username, questions, answer choices and wrong event id")
# def step_impl(context):
# data = {
# "event_id": 12,
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id doesn't exist")
@given("that I am a registered host of privilege walk events and want to create questions but forgets to give username")
def step_impl(context):
context.username = "11thMan"
@when("I make an API call to create questions API with missing username in request")
def step_impl(context):
data = {
"title": "Are you under 20?",
"choices": [
{
"description": "Yes",
"value": "1"
},
{
"description": "No",
"value": "-1"
}
]
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response that says questions cannot be created and username is required in request")
def step_impl(context):
assert context.api_response_data["detail"] == "Authentication credentials were not provided."
| true
| true
|
7907638945757bf6b7e2a1ee40a201af1c82cc5f
| 1,052
|
py
|
Python
|
juno/subscription.py
|
leogregianin/juno-python
|
0be2b70516b0dde713ff36cdb40888f06cc538f5
|
[
"MIT"
] | 2
|
2022-03-25T21:08:46.000Z
|
2022-03-31T21:10:17.000Z
|
juno/subscription.py
|
leogregianin/juno-python
|
0be2b70516b0dde713ff36cdb40888f06cc538f5
|
[
"MIT"
] | null | null | null |
juno/subscription.py
|
leogregianin/juno-python
|
0be2b70516b0dde713ff36cdb40888f06cc538f5
|
[
"MIT"
] | null | null | null |
from juno.resources import handler_request
from juno.resources.routes import subscription_routes
def create(dictionary):
return handler_request.post(subscription_routes.get_base_url(), dictionary)
def find_all():
return handler_request.get(subscription_routes.get_base_url())
def find_by_id(subscription_id):
return handler_request.get(
subscription_routes.get_specific_subscription_by_id_url(subscription_id)
)
def deactivation(subscription_id):
return handler_request.post(
subscription_routes.get_deactivation_subscription_url(subscription_id)
)
def activation(subscription_id):
return handler_request.post(
subscription_routes.get_activation_subscription_url(subscription_id)
)
def cancelation(subscription_id):
return handler_request.post(
subscription_routes.get_cancelation_subscription_url(subscription_id)
)
def completion(subscription_id):
return handler_request.post(
subscription_routes.get_completion_subscription_url(subscription_id)
)
| 25.658537
| 80
| 0.794677
|
from juno.resources import handler_request
from juno.resources.routes import subscription_routes
def create(dictionary):
return handler_request.post(subscription_routes.get_base_url(), dictionary)
def find_all():
return handler_request.get(subscription_routes.get_base_url())
def find_by_id(subscription_id):
return handler_request.get(
subscription_routes.get_specific_subscription_by_id_url(subscription_id)
)
def deactivation(subscription_id):
return handler_request.post(
subscription_routes.get_deactivation_subscription_url(subscription_id)
)
def activation(subscription_id):
return handler_request.post(
subscription_routes.get_activation_subscription_url(subscription_id)
)
def cancelation(subscription_id):
return handler_request.post(
subscription_routes.get_cancelation_subscription_url(subscription_id)
)
def completion(subscription_id):
return handler_request.post(
subscription_routes.get_completion_subscription_url(subscription_id)
)
| true
| true
|
790763e76e04babab4a4fb2c61b512d111fae7a9
| 525
|
py
|
Python
|
raritan/rpc/Time.py
|
daxm/raritan-pdu-json-rpc
|
9593d165290e93db5676c884aac138aebb983cbd
|
[
"BSD-3-Clause"
] | null | null | null |
raritan/rpc/Time.py
|
daxm/raritan-pdu-json-rpc
|
9593d165290e93db5676c884aac138aebb983cbd
|
[
"BSD-3-Clause"
] | null | null | null |
raritan/rpc/Time.py
|
daxm/raritan-pdu-json-rpc
|
9593d165290e93db5676c884aac138aebb983cbd
|
[
"BSD-3-Clause"
] | 2
|
2021-02-24T00:45:25.000Z
|
2021-11-29T17:27:19.000Z
|
import time, calendar
from datetime import datetime
#
# Decodes UNIX timestamp (UTC secs since epoch) to python datetime and vice versa.
#
class Time(datetime):
def __new__(cls, *x):
return datetime.__new__(cls, *x)
@staticmethod
def decode(json):
assert isinstance(json, int)
return Time.utcfromtimestamp(json)
def encode(self):
timestamp = calendar.timegm(self.utctimetuple())
return timestamp
def __str__(self):
return self.isoformat(" ") + " (UTC)"
| 23.863636
| 82
| 0.659048
|
import time, calendar
from datetime import datetime
class Time(datetime):
def __new__(cls, *x):
return datetime.__new__(cls, *x)
@staticmethod
def decode(json):
assert isinstance(json, int)
return Time.utcfromtimestamp(json)
def encode(self):
timestamp = calendar.timegm(self.utctimetuple())
return timestamp
def __str__(self):
return self.isoformat(" ") + " (UTC)"
| true
| true
|
790764e5b70291140639d8ff500555e96996eba0
| 75,990
|
py
|
Python
|
src/pulp/pulp.py
|
ruxkor/pulp-or
|
94f3cbe182e8adbd52bf996623f1f5e0ceb8e5ad
|
[
"MIT"
] | 2
|
2016-01-12T15:56:56.000Z
|
2019-09-05T07:13:29.000Z
|
src/pulp/pulp.py
|
ruxkor/pulp-or
|
94f3cbe182e8adbd52bf996623f1f5e0ceb8e5ad
|
[
"MIT"
] | null | null | null |
src/pulp/pulp.py
|
ruxkor/pulp-or
|
94f3cbe182e8adbd52bf996623f1f5e0ceb8e5ad
|
[
"MIT"
] | 7
|
2015-02-09T04:36:15.000Z
|
2020-01-04T15:21:14.000Z
|
#! /usr/bin/env python
# PuLP : Python LP Modeler
# Version 1.5.1
# Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org)
# Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz)
# $Id: pulp.py 1791 2008-04-23 22:54:34Z smit023 $
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
PuLP is an LP modeler written in python. PuLP can generate MPS or LP files
and call GLPK[1], COIN CLP/CBC[2], CPLEX[3], and GUROBI[4] to solve linear
problems.
See the examples directory for examples.
PuLP requires Python >= 2.5.
The examples require at least a solver in your PATH or a shared library file.
Documentation is found on https://www.coin-or.org/PuLP/.
A comprehensive wiki can be found at https://www.coin-or.org/PuLP/
Use LpVariable() to create new variables. To create a variable 0 <= x <= 3
>>> x = LpVariable("x", 0, 3)
To create a variable 0 <= y <= 1
>>> y = LpVariable("y", 0, 1)
Use LpProblem() to create new problems. Create "myProblem"
>>> prob = LpProblem("myProblem", LpMinimize)
Combine variables to create expressions and constraints and add them to the
problem.
>>> prob += x + y <= 2
If you add an expression (not a constraint), it will
become the objective.
>>> prob += -4*x + y
Choose a solver and solve the problem. ex:
>>> status = prob.solve(GLPK(msg = 0))
Display the status of the solution
>>> LpStatus[status]
'Optimal'
You can get the value of the variables using value(). ex:
>>> value(x)
2.0
Exported Classes:
- LpProblem -- Container class for a Linear programming problem
- LpVariable -- Variables that are added to constraints in the LP
- LpConstraint -- A constraint of the general form
a1x1+a2x2 ...anxn (<=, =, >=) b
- LpConstraintVar -- Used to construct a column of the model in column-wise
modelling
Exported Functions:
- value() -- Finds the value of a variable or expression
- lpSum() -- given a list of the form [a1*x1, a2x2, ..., anxn] will construct
a linear expression to be used as a constraint or variable
- lpDot() --given two lists of the form [a1, a2, ..., an] and
[ x1, x2, ..., xn] will construct a linear epression to be used
as a constraint or variable
Comments, bug reports, patches and suggestions are welcome.
pulp-or-discuss@googlegroups.com
References:
[1] http://www.gnu.org/software/glpk/glpk.html
[2] http://www.coin-or.org/
[3] http://www.cplex.com/
[4] http://www.gurobi.com/
"""
import types
import string
import itertools
from constants import *
from solvers import *
from types import GeneratorType
_DICT_TYPE = dict
if sys.platform not in ['cli']:
# iron python does not like an OrderedDict
try:
from odict import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
try:
#python 2.7 or 3.1
from collections import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
def setConfigInformation(**keywords):
"""
set the data in the configuration file
at the moment will only edit things in [locations]
the keyword value pairs come from the keywords dictionary
"""
#TODO: extend if we ever add another section in the config file
#read the old configuration
config = ConfigParser.SafeConfigParser()
config.read(config_filename)
#set the new keys
for (key,val) in keywords.items():
config.set("locations",key,val)
#write the new configuration
fp = open(config_filename,"w")
config.write(fp)
fp.close()
# Default solver selection
if PULP_CBC_CMD().available():
LpSolverDefault = PULP_CBC_CMD()
elif GLPK_CMD().available():
LpSolverDefault = GLPK_CMD()
elif COIN_CMD().available():
LpSolverDefault = COIN_CMD()
else:
LpSolverDefault = None
class LpElement(object):
"""Base class for LpVariable and LpConstraintVar
"""
#to remove illegal characters from the names
trans = string.maketrans("-+[] ->/","________")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget = getName,fset = setName)
def __init__(self, name):
self.name = name
# self.hash MUST be different for each variable
# else dict() will call the comparison operators that are overloaded
self.hash = id(self)
self.modified = True
def __hash__(self):
return self.hash
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __neg__(self):
return - LpAffineExpression(self)
def __pos__(self):
return self
def __nonzero__(self):
return 1
def __add__(self, other):
return LpAffineExpression(self) + other
def __radd__(self, other):
return LpAffineExpression(self) + other
def __sub__(self, other):
return LpAffineExpression(self) - other
def __rsub__(self, other):
return other - LpAffineExpression(self)
def __mul__(self, other):
return LpAffineExpression(self) * other
def __rmul__(self, other):
return LpAffineExpression(self) * other
def __div__(self, other):
return LpAffineExpression(self)/other
def __rdiv__(self, other):
raise TypeError, "Expressions cannot be divided by a variable"
def __le__(self, other):
return LpAffineExpression(self) <= other
def __ge__(self, other):
return LpAffineExpression(self) >= other
def __eq__(self, other):
return LpAffineExpression(self) == other
def __ne__(self, other):
if isinstance(other, LpVariable):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
class LpVariable(LpElement):
"""
This class models an LP Variable with the specified associated parameters
:param name: The name of the variable used in the output .lp file
:param lowbound: The lower bound on this variable's range.
Default is negative infinity
:param upBound: The upper bound on this variable's range.
Default is positive infinity
:param cat: The category this variable is in, Integer, Binary or
Continuous(default)
:param e: Used for column based modelling: relates to the variable's
existence in the objective function and constraints
"""
def __init__(self, name, lowBound = None, upBound = None,
cat = LpContinuous, e = None):
LpElement.__init__(self,name)
self.lowBound = lowBound
self.upBound = upBound
self.cat = cat
self.varValue = None
self.init = 0
#code to add a variable to constraints for column based
# modelling
if cat == LpBinary:
self.lowBound = 0
self.upBound = 1
self.cat = LpInteger
if e:
self.add_expression(e)
def add_expression(self,e):
self.expression = e
self.addVariableToConstraints(e)
@classmethod
def matrix(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
if len(indexs) == 0:
return [
LpVariable(name % tuple(indexStart + [i]), lowBound, upBound, cat)
for i in index
]
else:
return [
LpVariable.matrix(name, indexs, lowBound, upBound, cat, indexStart + [i])
for i in index
]
@classmethod
def dicts(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
"""
Creates a dictionary of LP variables
This function creates a dictionary of LP Variables with the specified
associated parameters.
:param name: The prefix to the name of each LP variable created
:param indexs: A list of strings of the keys to the dictionary of LP
variables, and the main part of the variable name itself
:param lowbound: The lower bound on these variables' range. Default is
negative infinity
:param upBound: The upper bound on these variables' range. Default is
positive infinity
:param cat: The category these variables are in, Integer or
Continuous(default)
:return: A dictionary of LP Variables
"""
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
d = {}
if len(indexs) == 0:
for i in index:
d[i] = LpVariable(name % tuple(indexStart + [str(i)]), lowBound, upBound, cat)
else:
for i in index:
d[i] = LpVariable.dicts(name, indexs, lowBound, upBound, cat, indexStart + [i])
return d
@classmethod
def dict(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
lists = indexs
if len(indexs)>1:
# Cartesian product
res = []
while len(lists):
first = lists[-1]
nres = []
if res:
if first:
for f in first:
nres.extend([[f]+r for r in res])
else:
nres = res
res = nres
else:
res = [[f] for f in first]
lists = lists[:-1]
index = [tuple(r) for r in res]
elif len(indexs) == 1:
index = indexs[0]
else:
return {}
d = dict((i, cls(name % i, lowBound, upBound, cat)) for i in index)
return d
def getLb(self):
return self.lowBound
def getUb(self):
return self.upBound
def bounds(self, low, up):
self.lowBound = low
self.upBound = up
def positive(self):
self.lowBound = 0
self.upBound = None
def value(self):
return self.varValue
def round(self, epsInt = 1e-5, eps = 1e-7):
if self.varValue is not None:
if self.upBound != None and self.varValue > self.upBound and self.varValue <= self.upBound + eps:
self.varValue = self.upBound
elif self.lowBound != None and self.varValue < self.lowBound and self.varValue >= self.lowBound - eps:
self.varValue = self.lowBound
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) <= epsInt:
self.varValue = round(self.varValue)
def roundedValue(self, eps = 1e-5):
if self.cat == LpInteger and self.varValue != None \
and abs(self.varValue - round(self.varValue)) <= eps:
return round(self.varValue)
else:
return self.varValue
def valueOrDefault(self):
if self.varValue != None:
return self.varValue
elif self.lowBound != None:
if self.upBound != None:
if 0 >= self.lowBound and 0 <= self.upBound:
return 0
else:
if self.lowBound >= 0:
return self.lowBound
else:
return self.upBound
else:
if 0 >= self.lowBound:
return 0
else:
return self.lowBound
elif self.upBound != None:
if 0 <= self.upBound:
return 0
else:
return self.upBound
else:
return 0
def valid(self, eps):
if self.varValue == None: return False
if self.upBound != None and self.varValue > self.upBound + eps:
return False
if self.lowBound != None and self.varValue < self.lowBound - eps:
return False
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) > eps:
return False
return True
def infeasibilityGap(self, mip = 1):
if self.varValue == None: raise ValueError, "variable value is None"
if self.upBound != None and self.varValue > self.upBound:
return self.varValue - self.upBound
if self.lowBound != None and self.varValue < self.lowBound:
return self.varValue - self.lowBound
if mip and self.cat == LpInteger and round(self.varValue) - self.varValue != 0:
return round(self.varValue) - self.varValue
return 0
def isBinary(self):
return self.cat == LpInteger and self.lowBound == 0 and self.upBound == 1
def isInteger(self):
return self.cat == LpInteger
def isFree(self):
return self.lowBound == None and self.upBound == None
def isConstant(self):
return self.lowBound != None and self.upBound == self.lowBound
def isPositive(self):
return self.lowBound == 0 and self.upBound == None
def asCplexLpVariable(self):
if self.isFree(): return self.name + " free"
if self.isConstant(): return self.name + " = %.12g" % self.lowBound
if self.lowBound == None:
s= "-inf <= "
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
elif (self.lowBound == 0 and self.cat == LpContinuous):
s = ""
else:
s= "%.12g <= " % self.lowBound
s += self.name
if self.upBound != None:
s+= " <= %.12g" % self.upBound
return s
def asCplexLpAffineExpression(self, name, constant = 1):
return LpAffineExpression(self).asCplexLpAffineExpression(name, constant)
def __ne__(self, other):
if isinstance(other, LpElement):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
def addVariableToConstraints(self,e):
"""adds a variable to the constraints indicated by
the LpConstraintVars in e
"""
for constraint, coeff in e.items():
constraint.addVariable(self,coeff)
def setInitialValue(self,val):
"""sets the initial value of the Variable to val
may of may not be supported by the solver
"""
raise NotImplementedError
class LpAffineExpression(_DICT_TYPE):
"""
A linear combination of :class:`LpVariables<LpVariable>`.
Can be initialised with the following:
#. e = None: an empty Expression
#. e = dict: gives an expression with the values being the coefficients of the keys (order of terms is undetermined)
#. e = list or generator of 2-tuples: equivalent to dict.items()
#. e = LpElement: an expression of length 1 with the coefficient 1
#. e = other: the constant is initialised as e
Examples:
>>> f=LpAffineExpression(LpElement('x'))
>>> f
1*x + 0
>>> x_name = ['x_0', 'x_1', 'x_2']
>>> x = [LpVariable(x_name[i], lowBound = 0, upBound = 10) for i in range(3) ]
>>> c = LpAffineExpression([ (x[0],1), (x[1],-3), (x[2],4)])
>>> c
1*x_0 + -3*x_1 + 4*x_2 + 0
"""
#to remove illegal characters from the names
trans = string.maketrans("-+[] ","_____")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget=getName, fset=setName)
def __init__(self, e = None, constant = 0, name = None):
self.name = name
#TODO remove isinstance usage
if e is None:
e = {}
if isinstance(e, LpAffineExpression):
# Will not copy the name
self.constant = e.constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, dict):
self.constant = constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, list) or isinstance(e, GeneratorType):
self.constant = constant
super(LpAffineExpression, self).__init__(e)
elif isinstance(e,LpElement):
self.constant = 0
super(LpAffineExpression, self).__init__( [(e, 1)])
else:
self.constant = e
super(LpAffineExpression, self).__init__()
# Proxy functions for variables
def isAtomic(self):
return len(self) == 1 and self.constant == 0 and self.values()[0] == 1
def isNumericalConstant(self):
return len(self) == 0
def atom(self):
return self.keys()[0]
# Functions on expressions
def __nonzero__(self):
return float(self.constant) != 0 or len(self)
def value(self):
s = self.constant
for v,x in self.iteritems():
if v.varValue is None:
return None
s += v.varValue * x
return s
def valueOrDefault(self):
s = self.constant
for v,x in self.iteritems():
s += v.valueOrDefault() * x
return s
def addterm(self, key, value):
y = self.get(key, 0)
if y:
y += value
self[key] = y
else:
self[key] = value
def emptyCopy(self):
return LpAffineExpression()
def copy(self):
"""Make a copy of self except the name which is reset"""
# Will not copy the name
return LpAffineExpression(self)
def __str__(self, constant = 1):
s = ""
for v in self.sorted_keys():
val = self[v]
if val<0:
if s != "": s += " - "
else: s += "-"
val = -val
elif s != "": s += " + "
if val == 1: s += str(v)
else: s += str(val) + "*" + str(v)
if constant:
if s == "":
s = str(self.constant)
else:
if self.constant < 0: s += " - " + str(-self.constant)
elif self.constant > 0: s += " + " + str(self.constant)
elif s == "":
s = "0"
return s
def sorted_keys(self):
"""
returns the list of keys sorted by name
"""
result = [(v.name, v) for v in self.keys()]
result.sort()
result = [v for _, v in result]
return result
def __repr__(self):
l = [str(self[v]) + "*" + str(v)
for v in self.sorted_keys()]
l.append(str(self.constant))
s = " + ".join(l)
return s
@staticmethod
def _count_characters(line):
#counts the characters in a list of strings
return sum(len(t) for t in line)
def asCplexVariablesOnly(self, name):
"""
helper for asCplexLpAffineExpression
"""
result = []
line = ["%s:" % name]
notFirst = 0
variables = self.sorted_keys()
for v in variables:
val = self[v]
if val < 0:
sign = " -"
val = -val
elif notFirst:
sign = " +"
else:
sign = ""
notFirst = 1
if val == 1:
term = "%s %s" %(sign, v.name)
else:
term = "%s %.12g %s" % (sign, val, v.name)
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
return result, line
def asCplexLpAffineExpression(self, name, constant = 1):
"""
returns a string that represents the Affine Expression in lp format
"""
#refactored to use a list for speed in iron python
result, line = self.asCplexVariablesOnly(name)
if not self:
term = " %s" % self.constant
else:
term = ""
if constant:
if self.constant < 0:
term = " - %s" % (-self.constant)
elif self.constant > 0:
term = " + %s" % self.constant
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line += [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def addInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, 1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.addInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant += other.constant
for v,x in other.iteritems():
self.addterm(v, x)
elif isinstance(other,dict):
for e in other.itervalues():
self.addInPlace(e)
else:
self.constant += other
return self
def subInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, -1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.subInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant -= other.constant
for v,x in other.iteritems():
self.addterm(v, -x)
elif isinstance(other,dict):
for e in other.itervalues():
self.subInPlace(e)
else:
self.constant -= other
return self
def __neg__(self):
e = self.emptyCopy()
e.constant = - self.constant
for v,x in self.iteritems():
e[v] = - x
return e
def __pos__(self):
return self
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
e = self.emptyCopy()
if isinstance(other,LpAffineExpression):
e.constant = self.constant * other.constant
if len(other):
if len(self):
raise TypeError, "Non-constant expressions cannot be multiplied"
else:
c = self.constant
if c != 0:
for v,x in other.iteritems():
e[v] = c * x
else:
c = other.constant
if c != 0:
for v,x in self.iteritems():
e[v] = c * x
elif isinstance(other,LpVariable):
return self * LpAffineExpression(other)
else:
if other != 0:
e.constant = self.constant * other
for v,x in self.iteritems():
e[v] = other * x
return e
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpAffineExpression) or isinstance(other,LpVariable):
if len(other):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
other = other.constant
e = self.emptyCopy()
e.constant = self.constant / other
for v,x in self.iteritems():
e[v] = x / other
return e
def __rdiv__(self, other):
e = self.emptyCopy()
if len(self):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
c = self.constant
if isinstance(other,LpAffineExpression):
e.constant = other.constant / c
for v,x in other.iteritems():
e[v] = x / c
else:
e.constant = other / c
return e
def __le__(self, other):
return LpConstraint(self - other, LpConstraintLE)
def __ge__(self, other):
return LpConstraint(self - other, LpConstraintGE)
def __eq__(self, other):
return LpConstraint(self - other, LpConstraintEQ)
class LpConstraint(LpAffineExpression):
"""An LP constraint"""
def __init__(self, e = None, sense = LpConstraintEQ,
name = None, rhs = None):
"""
:param e: an instance of :class:`LpAffineExpression`
:param sense: one of :data:`~pulp.constants.LpConstraintEQ`, :data:`~pulp.constants.LpConstraintGE`, :data:`~pulp.constants.LpConstraintLE` (0, 1, -1 respectively)
:param name: identifying string
:param rhs: numerical value of constraint target
"""
LpAffineExpression.__init__(self, e, name = name)
if rhs is not None:
self.constant = - rhs
self.sense = sense
self.modified = True
def getLb(self):
if ( (self.sense == LpConstraintGE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def getUb(self):
if ( (self.sense == LpConstraintLE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def __str__(self):
s = LpAffineExpression.__str__(self, 0)
if self.sense:
s += " " + LpConstraintSenses[self.sense] + " " + str(-self.constant)
return s
def asCplexLpConstraint(self, name):
"""
Returns a constraint as a string
"""
result, line = self.asCplexVariablesOnly(name)
if not self.keys():
line += ["0"]
c = -self.constant
if c == 0:
c = 0 # Supress sign
term = " %s %.12g" % (LpConstraintSenses[self.sense], c)
if self._count_characters(line)+len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def changeRHS(self, RHS):
"""
alters the RHS of a constraint so that it can be modified in a resolve
"""
self.constant = -RHS
self.modified = True
def __repr__(self):
s = LpAffineExpression.__repr__(self)
if self.sense is not None:
s += " " + LpConstraintSenses[self.sense] + " 0"
return s
def copy(self):
"""Make a copy of self"""
return LpConstraint(self, self.sense)
def emptyCopy(self):
return LpConstraint(sense = self.sense)
def addInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense >= 0:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
else:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
elif isinstance(other,list):
for e in other:
self.addInPlace(e)
else:
LpAffineExpression.addInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def subInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense <= 0:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
else:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
elif isinstance(other,list):
for e in other:
self.subInPlace(e)
else:
LpAffineExpression.subInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def __neg__(self):
c = LpAffineExpression.__neg__(self)
c.sense = - c.sense
return c
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__mul__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__div__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rdiv__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__rdiv__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def valid(self, eps = 0):
val = self.value()
if self.sense == LpConstraintEQ: return abs(val) <= eps
else: return val * self.sense >= - eps
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables to a hard constraint
uses FixedElasticSubProblem
"""
return FixedElasticSubProblem(self, *args, **kwargs)
class LpFractionConstraint(LpConstraint):
"""
Creates a constraint that enforces a fraction requirement a/b = c
"""
def __init__(self, numerator, denominator = None, sense = LpConstraintEQ,
RHS = 1.0, name = None,
complement = None):
"""
creates a fraction Constraint to model constraints of
the nature
numerator/denominator {==, >=, <=} RHS
numerator/(numerator + complement) {==, >=, <=} RHS
:param numerator: the top of the fraction
:param denominator: as described above
:param sense: the sense of the relation of the constraint
:param RHS: the target fraction value
:param complement: as described above
"""
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
self.denominator = denominator
self.complement = complement
lhs = self.numerator - RHS * self.denominator
LpConstraint.__init__(self, lhs,
sense = sense, rhs = 0, name = name)
self.RHS = RHS
def findLHSValue(self):
"""
Determines the value of the fraction in the constraint after solution
"""
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables and splitting the
hard constraint
uses FractionElasticSubProblem
"""
return FractionElasticSubProblem(self, *args, **kwargs)
class LpConstraintVar(LpElement):
"""A Constraint that can be treated as a variable when constructing
a LpProblem by columns
"""
def __init__(self, name = None ,sense = None,
rhs = None, e = None):
LpElement.__init__(self,name)
self.constraint = LpConstraint(name = self.name, sense = sense,
rhs = rhs , e = e)
def addVariable(self, var, coeff):
"""
Adds a variable to the constraint with the
activity coeff
"""
self.constraint.addterm(var, coeff)
def value(self):
return self.constraint.value()
class LpProblem(object):
"""An LP Problem"""
def __init__(self, name = "NoName", sense = LpMinimize):
"""
Creates an LP Problem
This function creates a new LP Problem with the specified associated parameters
:param name: name of the problem used in the output .lp file
:param sense: of the LP problem objective. \
Either :data:`~pulp.constants.LpMinimize` (default) \
or :data:`~pulp.constants.LpMaximize`.
:return: An LP Problem
"""
self.objective = None
self.constraints = _DICT_TYPE()
self.name = name
self.sense = sense
self.sos1 = {}
self.sos2 = {}
self.status = LpStatusNotSolved
self.noOverlap = 1
self.solver = None
self.initialValues = {}
self.resolveOK = False
self._variables = []
self._variable_ids = {} #old school using dict.keys() for a set
self.dummyVar = None
# locals
self.lastUnused = 0
def __repr__(self):
string = self.name+":\n"
if self.sense == 1:
string += "MINIMIZE\n"
else:
string += "MAXIMIZE\n"
string += repr(self.objective) +"\n"
if self.constraints:
string += "SUBJECT TO\n"
for n, c in self.constraints.iteritems():
string += c.asCplexLpConstraint(n) +"\n"
string += "VARIABLES\n"
for v in self.variables():
string += v.asCplexLpVariable() + " " + LpCategories[v.cat] + "\n"
return string
def copy(self):
"""Make a copy of self. Expressions are copied by reference"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
lpcopy.objective = self.objective
lpcopy.constraints = self.constraints.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def deepcopy(self):
"""Make a copy of self. Expressions are copied by value"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
if self.objective is not None:
lpcopy.objective = self.objective.copy()
lpcopy.constraints = {}
for k,v in self.constraints.iteritems():
lpcopy.constraints[k] = v.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def normalisedNames(self):
constraintsNames = {}
i = 0
for k in self.constraints:
constraintsNames[k] = "C%07d" % i
i += 1
variablesNames = {}
i = 0
for k in self.variables():
variablesNames[k.name] = "X%07d" % i
i += 1
return constraintsNames, variablesNames, "OBJ"
def isMIP(self):
for v in self.variables():
if v.cat == LpInteger: return 1
return 0
def roundSolution(self, epsInt = 1e-5, eps = 1e-7):
"""
Rounds the lp variables
Inputs:
- none
Side Effects:
- The lp variables are rounded
"""
for v in self.variables():
v.round(epsInt, eps)
def unusedConstraintName(self):
self.lastUnused += 1
while 1:
s = "_C%d" % self.lastUnused
if s not in self.constraints: break
self.lastUnused += 1
return s
def valid(self, eps = 0):
for v in self.variables():
if not v.valid(eps): return False
for c in self.constraints.itervalues():
if not c.valid(eps): return False
else:
return True
def infeasibilityGap(self, mip = 1):
gap = 0
for v in self.variables():
gap = max(abs(v.infeasibilityGap(mip)), gap)
for c in self.constraints.itervalues():
if not c.valid(0):
gap = max(abs(c.value()), gap)
return gap
def addVariable(self, variable):
"""
Adds a variable to the problem before a constraint is added
@param variable: the variable to be added
"""
if id(variable) not in self._variable_ids:
self._variables.append(variable)
self._variable_ids[id(variable)] = variable
def addVariables(self, variables):
"""
Adds variables to the problem before a constraint is added
@param variables: the variables to be added
"""
for v in variables:
self.addVariable(v)
def variables(self):
"""
Returns a list of the problem variables
Inputs:
- none
Returns:
- A list of the problem variables
"""
if self.objective:
self.addVariables(self.objective.keys())
for c in self.constraints.itervalues():
self.addVariables(c.keys())
variables = self._variables
#sort the varibles DSU
variables = [[v.name, v] for v in variables]
variables.sort()
variables = [v for _, v in variables]
return variables
def variablesDict(self):
variables = {}
if self.objective:
for v in self.objective:
variables[v.name] = v
for c in self.constraints.values():
for v in c:
variables[v.name] = v
return variables
def add(self, constraint, name = None):
self.addConstraint(constraint, name)
def addConstraint(self, constraint, name = None):
if not isinstance(constraint, LpConstraint):
raise TypeError, "Can only add LpConstraint objects"
if name:
constraint.name = name
try:
if constraint.name:
name = constraint.name
else:
name = self.unusedConstraintName()
except AttributeError:
raise TypeError, "Can only add LpConstraint objects"
#removed as this test fails for empty constraints
# if len(constraint) == 0:
# if not constraint.valid():
# raise ValueError, "Cannot add false constraints"
if name in self.constraints:
if self.noOverlap:
raise PulpError, "overlapping constraint names: " + name
else:
print "Warning: overlapping constraint names:", name
self.constraints[name] = constraint
self.addVariables(constraint.keys())
def setObjective(self,obj):
"""
Sets the input variable as the objective function. Used in Columnwise Modelling
:param obj: the objective function of type :class:`LpConstraintVar`
Side Effects:
- The objective function is set
"""
if isinstance(obj, LpVariable):
# allows the user to add a LpVariable as an objective
obj = obj + 0.0
try:
obj = obj.constraint
name = obj.name
except AttributeError:
name = None
self.objective = obj
self.objective.name = name
self.resolveOK = False
def __iadd__(self, other):
if isinstance(other, tuple):
other, name = other
else:
name = None
if other is True:
return self
if isinstance(other, LpConstraintVar):
self.addConstraint(other.constraint)
elif isinstance(other, LpConstraint):
self.addConstraint(other, name)
elif isinstance(other, LpAffineExpression):
self.objective = other
self.objective.name = name
elif isinstance(other, LpVariable) or type(other) in [int, float]:
self.objective = LpAffineExpression(other)
self.objective.name = name
else:
raise TypeError, "Can only add LpConstraintVar, LpConstraint, LpAffineExpression or True objects"
return self
def extend(self, other, use_objective = True):
"""
extends an LpProblem by adding constraints either from a dictionary
a tuple or another LpProblem object.
@param use_objective: determines whether the objective is imported from
the other problem
For dictionaries the constraints will be named with the keys
For tuples an unique name will be generated
For LpProblems the name of the problem will be added to the constraints
name
"""
if isinstance(other, dict):
for name in other:
self.constraints[name] = other[name]
elif isinstance(other, LpProblem):
for v in set(other.variables()).difference(self.variables()):
v.name = other.name + v.name
for name,c in other.constraints.iteritems():
c.name = other.name + name
self.addConstraint(c)
if use_objective:
self.objective += other.objective
else:
for c in other:
if isinstance(c,tuple):
name = c[0]
c = c[1]
else:
name = None
if not name: name = c.name
if not name: name = self.unusedConstraintName()
self.constraints[name] = c
def coefficients(self, translation = None):
coefs = []
if translation == None:
for c in self.constraints:
cst = self.constraints[c]
coefs.extend([(v.name, c, cst[v]) for v in cst])
else:
for c in self.constraints:
ctr = translation[c]
cst = self.constraints[c]
coefs.extend([(translation[v.name], ctr, cst[v]) for v in cst])
return coefs
def writeMPS(self, filename, mpsSense = 0, rename = 0, mip = 1):
wasNone, dummyVar = self.fixObjective()
f = file(filename, "w")
if mpsSense == 0: mpsSense = self.sense
cobj = self.objective
if mpsSense != self.sense:
n = cobj.name
cobj = - cobj
cobj.name = n
if rename:
constraintsNames, variablesNames, cobj.name = self.normalisedNames()
f.write("*SENSE:"+LpSenses[mpsSense]+"\n")
n = self.name
if rename: n = "MODEL"
f.write("NAME "+n+"\n")
vs = self.variables()
# constraints
f.write("ROWS\n")
objName = cobj.name
if not objName: objName = "OBJ"
f.write(" N %s\n" % objName)
mpsConstraintType = {LpConstraintLE:"L", LpConstraintEQ:"E", LpConstraintGE:"G"}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
f.write(" "+mpsConstraintType[c.sense]+" "+k+"\n")
# matrix
f.write("COLUMNS\n")
# Creation of a dict of dict:
# coefs[nomVariable][nomContrainte] = coefficient
coefs = {}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
for v in c:
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
coefs[n][k] = c[v]
else:
coefs[n] = {k:c[v]}
for v in vs:
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTORG'\n")
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
cv = coefs[n]
# Most of the work is done here
for k in cv: f.write(" %-8s %-8s % .5e\n" % (n,k,cv[k]))
# objective function
if v in cobj: f.write(" %-8s %-8s % .5e\n" % (n,objName,cobj[v]))
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTEND'\n")
# right hand side
f.write("RHS\n")
for k,c in self.constraints.iteritems():
c = -c.constant
if rename: k = constraintsNames[k]
if c == 0: c = 0
f.write(" RHS %-8s % .5e\n" % (k,c))
# bounds
f.write("BOUNDS\n")
for v in vs:
n = v.name
if rename: n = variablesNames[n]
if v.lowBound != None and v.lowBound == v.upBound:
f.write(" FX BND %-8s % .5e\n" % (n, v.lowBound))
elif v.lowBound == 0 and v.upBound == 1 and mip and v.cat == LpInteger:
f.write(" BV BND %-8s\n" % n)
else:
if v.lowBound != None:
# In MPS files, variables with no bounds (i.e. >= 0)
# are assumed BV by COIN and CPLEX.
# So we explicitly write a 0 lower bound in this case.
if v.lowBound != 0 or (mip and v.cat == LpInteger and v.upBound == None):
f.write(" LO BND %-8s % .5e\n" % (n, v.lowBound))
else:
if v.upBound != None:
f.write(" MI BND %-8s\n" % n)
else:
f.write(" FR BND %-8s\n" % n)
if v.upBound != None:
f.write(" UP BND %-8s % .5e\n" % (n, v.upBound))
f.write("ENDATA\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
# returns the variables, in writing order
if rename == 0:
return vs
else:
return vs, variablesNames, constraintsNames, cobj.name
def writeLP(self, filename, writeSOS = 1, mip = 1):
"""
Write the given Lp problem to a .lp file.
This function writes the specifications (objective function,
constraints, variables) of the defined Lp problem to a file.
:param filename: the name of the file to be created.
Side Effects:
- The file is created.
"""
f = file(filename, "w")
f.write("\\* "+self.name+" *\\\n")
if self.sense == 1:
f.write("Minimize\n")
else:
f.write("Maximize\n")
wasNone, dummyVar = self.fixObjective()
objName = self.objective.name
if not objName: objName = "OBJ"
f.write(self.objective.asCplexLpAffineExpression(objName, constant = 0))
f.write("Subject To\n")
ks = self.constraints.keys()
ks.sort()
for k in ks:
constraint = self.constraints[k]
if not constraint.keys():
#empty constraint add the dummyVar
constraint += self.get_dummyVar()
f.write(constraint.asCplexLpConstraint(k))
vs = self.variables()
# check if any names are longer than 100 characters
long_names = [v.name for v in vs if len(v.name) > 100]
if long_names:
raise PulpError('Variable names too long for Lp format\n'
+ str(long_names))
# check for repeated names
repeated_names = {}
for v in vs:
repeated_names[v.name] = repeated_names.get(v.name, 0) + 1
repeated_names = [(key, value) for key, value in repeated_names.items()
if value >= 2]
if repeated_names:
raise PulpError('Repeated variable names in Lp format\n'
+ str(repeated_names))
# Bounds on non-"positive" variables
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
if mip:
vg = [v for v in vs if not (v.isPositive() and v.cat == LpContinuous) \
and not v.isBinary()]
else:
vg = [v for v in vs if not v.isPositive()]
if vg:
f.write("Bounds\n")
for v in vg:
f.write("%s\n" % v.asCplexLpVariable())
# Integer non-binary variables
if mip:
vg = [v for v in vs if v.cat == LpInteger and not v.isBinary()]
if vg:
f.write("Generals\n")
for v in vg: f.write("%s\n" % v.name)
# Binary variables
vg = [v for v in vs if v.isBinary()]
if vg:
f.write("Binaries\n")
for v in vg: f.write("%s\n" % v.name)
# Special Ordered Sets
if writeSOS and (self.sos1 or self.sos2):
f.write("SOS\n")
if self.sos1:
for sos in self.sos1.itervalues():
f.write("S1:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
if self.sos2:
for sos in self.sos2.itervalues():
f.write("S2:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
f.write("End\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
def assignVarsVals(self, values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].varValue = values[name]
def assignVarsDj(self,values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].dj = values[name]
def assignConsPi(self, values):
for name in values:
self.constraints[name].pi = values[name]
def assignConsSlack(self, values, activity=False):
for name in values:
if activity:
#reports the activitynot the slack
self.constraints[name].slack = -(self.constraints[name].constant + float(values[name]))
else:
self.constraints[name].slack = float(values[name])
def get_dummyVar(self):
if self.dummyVar is None:
self.dummyVar = LpVariable("__dummy", 0, 0)
return self.dummyVar
def fixObjective(self):
if self.objective is None:
self.objective = 0
wasNone = 1
else:
wasNone = 0
if not isinstance(self.objective, LpAffineExpression):
self.objective = LpAffineExpression(self.objective)
if self.objective.isNumericalConstant():
dummyVar = self.get_dummyVar()
self.objective += dummyVar
else:
dummyVar = None
return wasNone, dummyVar
def restoreObjective(self, wasNone, dummyVar):
if wasNone:
self.objective = None
elif not dummyVar is None:
self.objective -= dummyVar
def solve(self, solver = None, **kwargs):
"""
Solve the given Lp problem.
This function changes the problem to make it suitable for solving
then calls the solver.actualSolve() method to find the solution
:param solver: Optional: the specific solver to be used, defaults to the
default solver.
Side Effects:
- The attributes of the problem object are changed in
:meth:`~pulp.solver.LpSolver.actualSolve()` to reflect the Lp solution
"""
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
wasNone, dummyVar = self.fixObjective()
#time it
self.solutionTime = -clock()
status = solver.actualSolve(self, **kwargs)
self.solutionTime += clock()
self.restoreObjective(wasNone, dummyVar)
self.solver = solver
return status
def sequentialSolve(self, objectives, absoluteTols = None,
relativeTols = None, solver = None, debug = False):
"""
Solve the given Lp problem with several objective functions.
This function sequentially changes the objective of the problem
and then adds the objective function as a constraint
:param objectives: the list of objectives to be used to solve the problem
:param absoluteTols: the list of absolute tolerances to be applied to
the constraints should be +ve for a minimise objective
:param relativeTols: the list of relative tolerances applied to the constraints
:param solver: the specific solver to be used, defaults to the default solver.
"""
#TODO Add a penalty variable to make problems elastic
#TODO add the ability to accept different status values i.e. infeasible etc
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
if not(absoluteTols):
absoluteTols = [0] * len(objectives)
if not(relativeTols):
relativeTols = [1] * len(objectives)
#time it
self.solutionTime = -clock()
statuses = []
for i,(obj,absol,rel) in enumerate(zip(objectives, absoluteTols, relativeTols)):
self.setObjective(obj)
status = solver.actualSolve(self)
statuses.append(status)
if debug: self.writeLP("%sSequence.lp"%i)
if self.sense == LpMinimize:
self += obj <= value(obj)*rel + absol,"%s_Sequence_Objective"%i
elif self.sense == LpMaximize:
self += obj >= value(obj)*rel + absol,"%s_Sequence_Objective"%i
self.solutionTime += clock()
self.solver = solver
return statuses
def resolve(self, solver = None, **kwargs):
"""
resolves an Problem using the same solver as previously
"""
if not(solver): solver = self.solver
if self.resolveOK:
return self.solver.actualResolve(self, **kwargs)
else:
logging.warn('resolve not ok. solving instead')
return self.solve(solver=solver, **kwargs)
def setSolver(self,solver = LpSolverDefault):
"""Sets the Solver for this problem useful if you are using
resolve
"""
self.solver = solver
def setInitial(self,values):
self.initialValues = values
class FixedElasticSubProblem(LpProblem):
"""
Contains the subproblem generated by converting a fixed constraint
:math:`\sum_{i}a_i x_i = b` into an elastic constraint.
:param constraint: The LpConstraint that the elastic constraint is based on
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound:
the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on \
constraint violation that is free from penalty, expressed as a list\
where [-ve, +ve]
"""
def __init__(self, constraint, penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % constraint.name
LpProblem.__init__(self, subProblemName, LpMinimize)
self.objective = LpAffineExpression()
self.constraint = constraint
self.constant = constraint.constant
self.RHS = - constraint.constant
self.objective = LpAffineExpression()
self += constraint, "_Constraint"
#create and add these variables but disabled
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
constraint.addInPlace(self.freeVar + self.lowVar + self.upVar)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
#add a costless variable
self.freeVar.upBound = abs(constraint.constant *
proportionFreeBoundList[0])
self.freeVar.lowBound = -abs(constraint.constant *
proportionFreeBoundList[1])
# Note the reversal of the upbound and lowbound due to the nature of the
# variable
if penalty is not None:
#activate these variables
self.upVar.upBound = None
self.lowVar.lowBound = None
self.objective = penalty*self.upVar - penalty*self.lowVar
def _findValue(self, attrib):
"""
safe way to get the value of a variable that may not exist
"""
var = getattr(self, attrib, 0)
if var:
if value(var) is not None:
return value(var)
else:
return 0.0
else:
return 0.0
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
result = abs(upVar + lowVar) >= EPS
if result:
logging.debug("isViolated %s, upVar %s, lowVar %s, freeVar %s result %s"%(
self.name, upVar, lowVar, freeVar, result))
logging.debug("isViolated value lhs %s constant %s"%(
self.findLHSValue(), self.RHS))
return result
def findDifferenceFromRHS(self):
"""
The amount the actual value varies from the RHS (sense: LHS - RHS)
"""
return self.findLHSValue() - self.RHS
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
return self.constraint.value() - self.constant - \
upVar - lowVar - freeVar
def deElasticize(self):
""" de-elasticize constraint """
self.upVar.upBound = 0
self.lowVar.lowBound = 0
def reElasticize(self):
"""
Make the Subproblem elastic again after deElasticize
"""
self.upVar.lowBound = 0
self.upVar.upBound = None
self.lowVar.upBound = 0
self.lowVar.lowBound = None
def alterName(self, name):
"""
Alters the name of anonymous parts of the problem
"""
self.name = "%s_elastic_SubProblem" % name
if hasattr(self, 'freeVar'):
self.freeVar.name = self.name + "_free_bound"
if hasattr(self, 'upVar'):
self.upVar.name = self.name + "_pos_penalty_var"
if hasattr(self, 'lowVar'):
self.lowVar.name = self.name + "_neg_penalty_var"
class FractionElasticSubProblem(FixedElasticSubProblem):
"""
Contains the subproblem generated by converting a Fraction constraint
numerator/(numerator+complement) = b
into an elastic constraint
:param name: The name of the elastic subproblem
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound: the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on
constraint violation that is free from penalty, expressed as a list
where [-ve, +ve]
"""
def __init__(self, name, numerator, RHS, sense,
complement = None,
denominator = None,
penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % name
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
raise PulpError, 'only one of denominator and complement must be specified'
self.RHS = RHS
self.lowTarget = self.upTarget = None
LpProblem.__init__(self, subProblemName, LpMinimize)
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
upProportionFreeBound, lowProportionFreeBound = \
proportionFreeBoundList
else:
upProportionFreeBound, lowProportionFreeBound = (0, 0)
#create an objective
self += LpAffineExpression()
#There are three cases if the constraint.sense is ==, <=, >=
if sense in [LpConstraintEQ, LpConstraintLE]:
#create a constraint the sets the upper bound of target
self.upTarget = RHS + upProportionFreeBound
self.upConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintLE,
self.upTarget,
denominator = self.denominator)
if penalty is not None:
self.lowVar.lowBound = None
self.objective += -1* penalty * self.lowVar
self.upConstraint += self.lowVar
self += self.upConstraint, '_upper_constraint'
if sense in [LpConstraintEQ, LpConstraintGE]:
#create a constraint the sets the lower bound of target
self.lowTarget = RHS - lowProportionFreeBound
self.lowConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintGE,
self.lowTarget,
denominator = self.denominator)
if penalty is not None:
self.upVar.upBound = None
self.objective += penalty * self.upVar
self.lowConstraint += self.upVar
self += self.lowConstraint, '_lower_constraint'
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
# uses code from LpFractionConstraint
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
if abs(value(self.denominator))>= EPS:
if self.lowTarget is not None:
if self.lowTarget > self.findLHSValue():
return True
if self.upTarget is not None:
if self.findLHSValue() > self.upTarget:
return True
else:
#if the denominator is zero the constraint is satisfied
return False
class LpVariableDict(dict):
"""An LP variable generator"""
def __init__(self, name, data = {}, lowBound = None, upBound = None, cat = LpContinuous):
self.name = name
dict.__init__(self, data)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
self[key] = LpVariable(name % key, lowBound, upBound, cat)
return self[key]
# Utility functions
def lpSum(vector):
"""
Calculate the sum of a list of linear expressions
:param vector: A list of linear expressions
"""
return LpAffineExpression().addInPlace(vector)
def lpDot(v1, v2):
"""Calculate the dot product of two lists of linear expressions"""
if not isiterable(v1) and not isiterable(v2):
return v1 * v2
elif not isiterable(v1):
return lpDot([v1]*len(v2),v2)
elif not isiterable(v2):
return lpDot(v1,[v2]*len(v1))
else:
return lpSum([lpDot(e1,e2) for e1,e2 in zip(v1,v2)])
def isNumber(x):
"""Returns true if x is an int of a float"""
return type(x) in [int, float]
def value(x):
"""Returns the value of the variable/expression x, or x if it is a number"""
if isNumber(x): return x
else: return x.value()
def valueOrDefault(x):
"""Returns the value of the variable/expression x, or x if it is a number
Variable without value (None) are affected a possible value (within their
bounds)."""
if isNumber(x): return x
else: return x.valueOrDefault()
def combination(orgset, k = None):
"""
returns an iterator that lists the combinations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = combination([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
try:
import probstat
return probstat.Combination(orgset,k)
except(ImportError):
return __combination(orgset,k)
def __combination(orgset,k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __combination(orgset[i+1:],k-1):
yield (x,) + s
def permutation(orgset, k = None):
"""
returns an iterator that lists the permutations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = permutation([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
try:
import probstat
return probstat.Permutation(orgset, k)
except(ImportError):
return __permutation(orgset, k)
def __permutation(orgset, k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __permutation(orgset[:i] + orgset[i+1:],k-1):
yield (x,)+ s
def allpermutations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allpermutations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
return itertools.chain(*[permutation(orgset,i) for i in range(1,k+1)])
def allcombinations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allcombinations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
return itertools.chain(*[combination(orgset,i) for i in range(1,k+1)])
def makeDict(headers, array, default = None):
"""
makes a list into a dictionary with the headings given in headings
headers is a list of header lists
array is a list with the data
"""
result, defdict = __makeDict(headers, array, default)
return result
def __makeDict(headers, array, default = None):
#this is a recursive function so end the recursion as follows
result ={}
returndefaultvalue = None
if len(headers) == 1:
result.update(dict(zip(headers[0],array)))
defaultvalue = default
else:
for i,h in enumerate(headers[0]):
result[h],defaultvalue = __makeDict(headers[1:],array[i],default)
if default != None:
f = lambda :defaultvalue
defresult = collections.defaultdict(f)
defresult.update(result)
result = defresult
returndefaultvalue = collections.defaultdict(f)
return result, returndefaultvalue
def splitDict(Data):
"""
Split a dictionary with lists as the data, into smaller dictionaries
:param Data: A dictionary with lists as the values
:return: A tuple of dictionaries each containing the data separately,
with the same dictionary keys
"""
# find the maximum number of items in the dictionary
maxitems = max([len(values) for values in Data.values()])
output =[dict() for i in range(maxitems)]
for key, values in Data.items():
for i, val in enumerate(values):
output[i][key] = val
return tuple(output)
def read_table(data, coerce_type, transpose=False):
'''
Reads in data from a simple table and forces it to be a particular type
This is a helper function that allows data to be easily constained in a
simple script
::return: a dictionary of with the keys being a tuple of the strings
in the first row and colum of the table
::param data: the multiline string containing the table data
::param coerce_type: the type that the table data is converted to
::param transpose: reverses the data if needed
Example:
>>> table_data = """
... L1 L2 L3 L4 L5 L6
... C1 6736 42658 70414 45170 184679 111569
... C2 217266 227190 249640 203029 153531 117487
... C3 35936 28768 126316 2498 130317 74034
... C4 73446 52077 108368 75011 49827 62850
... C5 174664 177461 151589 153300 59916 135162
... C6 186302 189099 147026 164938 149836 286307
... """
>>> table = read_table(table_data, int)
>>> table[("C1","L1")]
6736
>>> table[("C6","L5")]
149836
'''
lines = data.splitlines()
headings = lines[1].split()
result = {}
for row in lines[2:]:
items = row.split()
for i, item in enumerate(items[1:]):
if transpose:
key = (headings[i], items[0])
else:
key = (items[0], headings[i])
result[key] = coerce_type(item)
return result
def configSolvers():
"""
Configure the path the the solvers on the command line
Designed to configure the file locations of the solvers from the
command line after installation
"""
configlist = [(cplex_dll_path,"cplexpath","CPLEX: "),
(coinMP_path, "coinmppath","CoinMP dll (windows only): ")]
print ("Please type the full path including filename and extension \n" +
"for each solver available")
configdict = {}
for (default, key, msg) in configlist:
value = raw_input(msg + "[" + str(default) +"]")
if value:
configdict[key] = value
setConfigInformation(**configdict)
def pulpTestAll():
from tests import pulpTestSolver
solvers = [PULP_CBC_CMD,
CPLEX_DLL,
CPLEX_CMD,
CPLEX_PY,
COIN_CMD,
COINMP_DLL,
GLPK_CMD,
XPRESS,
GUROBI,
GUROBI_CMD,
PYGLPK,
YAPOSIB
]
for s in solvers:
if s().available():
#~ try:
pulpTestSolver(s)
print "* Solver", s, "passed."
#~ except Exception, e:
#~ print e
#~ print "* Solver", s, "failed."
else:
print "Solver", s, "unavailable."
def pulpDoctest():
"""
runs all doctests
"""
import doctest
if __name__ != '__main__':
import pulp
doctest.testmod(pulp)
else:
doctest.testmod()
if __name__ == '__main__':
# Tests
pulpTestAll()
pulpDoctest()
| 33.75833
| 171
| 0.561311
|
"""
PuLP is an LP modeler written in python. PuLP can generate MPS or LP files
and call GLPK[1], COIN CLP/CBC[2], CPLEX[3], and GUROBI[4] to solve linear
problems.
See the examples directory for examples.
PuLP requires Python >= 2.5.
The examples require at least a solver in your PATH or a shared library file.
Documentation is found on https://www.coin-or.org/PuLP/.
A comprehensive wiki can be found at https://www.coin-or.org/PuLP/
Use LpVariable() to create new variables. To create a variable 0 <= x <= 3
>>> x = LpVariable("x", 0, 3)
To create a variable 0 <= y <= 1
>>> y = LpVariable("y", 0, 1)
Use LpProblem() to create new problems. Create "myProblem"
>>> prob = LpProblem("myProblem", LpMinimize)
Combine variables to create expressions and constraints and add them to the
problem.
>>> prob += x + y <= 2
If you add an expression (not a constraint), it will
become the objective.
>>> prob += -4*x + y
Choose a solver and solve the problem. ex:
>>> status = prob.solve(GLPK(msg = 0))
Display the status of the solution
>>> LpStatus[status]
'Optimal'
You can get the value of the variables using value(). ex:
>>> value(x)
2.0
Exported Classes:
- LpProblem -- Container class for a Linear programming problem
- LpVariable -- Variables that are added to constraints in the LP
- LpConstraint -- A constraint of the general form
a1x1+a2x2 ...anxn (<=, =, >=) b
- LpConstraintVar -- Used to construct a column of the model in column-wise
modelling
Exported Functions:
- value() -- Finds the value of a variable or expression
- lpSum() -- given a list of the form [a1*x1, a2x2, ..., anxn] will construct
a linear expression to be used as a constraint or variable
- lpDot() --given two lists of the form [a1, a2, ..., an] and
[ x1, x2, ..., xn] will construct a linear epression to be used
as a constraint or variable
Comments, bug reports, patches and suggestions are welcome.
pulp-or-discuss@googlegroups.com
References:
[1] http://www.gnu.org/software/glpk/glpk.html
[2] http://www.coin-or.org/
[3] http://www.cplex.com/
[4] http://www.gurobi.com/
"""
import types
import string
import itertools
from constants import *
from solvers import *
from types import GeneratorType
_DICT_TYPE = dict
if sys.platform not in ['cli']:
try:
from odict import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
try:
from collections import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
def setConfigInformation(**keywords):
"""
set the data in the configuration file
at the moment will only edit things in [locations]
the keyword value pairs come from the keywords dictionary
"""
config = ConfigParser.SafeConfigParser()
config.read(config_filename)
for (key,val) in keywords.items():
config.set("locations",key,val)
fp = open(config_filename,"w")
config.write(fp)
fp.close()
if PULP_CBC_CMD().available():
LpSolverDefault = PULP_CBC_CMD()
elif GLPK_CMD().available():
LpSolverDefault = GLPK_CMD()
elif COIN_CMD().available():
LpSolverDefault = COIN_CMD()
else:
LpSolverDefault = None
class LpElement(object):
"""Base class for LpVariable and LpConstraintVar
"""
trans = string.maketrans("-+[] ->/","________")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget = getName,fset = setName)
def __init__(self, name):
self.name = name
self.hash = id(self)
self.modified = True
def __hash__(self):
return self.hash
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __neg__(self):
return - LpAffineExpression(self)
def __pos__(self):
return self
def __nonzero__(self):
return 1
def __add__(self, other):
return LpAffineExpression(self) + other
def __radd__(self, other):
return LpAffineExpression(self) + other
def __sub__(self, other):
return LpAffineExpression(self) - other
def __rsub__(self, other):
return other - LpAffineExpression(self)
def __mul__(self, other):
return LpAffineExpression(self) * other
def __rmul__(self, other):
return LpAffineExpression(self) * other
def __div__(self, other):
return LpAffineExpression(self)/other
def __rdiv__(self, other):
raise TypeError, "Expressions cannot be divided by a variable"
def __le__(self, other):
return LpAffineExpression(self) <= other
def __ge__(self, other):
return LpAffineExpression(self) >= other
def __eq__(self, other):
return LpAffineExpression(self) == other
def __ne__(self, other):
if isinstance(other, LpVariable):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
class LpVariable(LpElement):
"""
This class models an LP Variable with the specified associated parameters
:param name: The name of the variable used in the output .lp file
:param lowbound: The lower bound on this variable's range.
Default is negative infinity
:param upBound: The upper bound on this variable's range.
Default is positive infinity
:param cat: The category this variable is in, Integer, Binary or
Continuous(default)
:param e: Used for column based modelling: relates to the variable's
existence in the objective function and constraints
"""
def __init__(self, name, lowBound = None, upBound = None,
cat = LpContinuous, e = None):
LpElement.__init__(self,name)
self.lowBound = lowBound
self.upBound = upBound
self.cat = cat
self.varValue = None
self.init = 0
#code to add a variable to constraints for column based
# modelling
if cat == LpBinary:
self.lowBound = 0
self.upBound = 1
self.cat = LpInteger
if e:
self.add_expression(e)
def add_expression(self,e):
self.expression = e
self.addVariableToConstraints(e)
@classmethod
def matrix(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
if len(indexs) == 0:
return [
LpVariable(name % tuple(indexStart + [i]), lowBound, upBound, cat)
for i in index
]
else:
return [
LpVariable.matrix(name, indexs, lowBound, upBound, cat, indexStart + [i])
for i in index
]
@classmethod
def dicts(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
"""
Creates a dictionary of LP variables
This function creates a dictionary of LP Variables with the specified
associated parameters.
:param name: The prefix to the name of each LP variable created
:param indexs: A list of strings of the keys to the dictionary of LP
variables, and the main part of the variable name itself
:param lowbound: The lower bound on these variables' range. Default is
negative infinity
:param upBound: The upper bound on these variables' range. Default is
positive infinity
:param cat: The category these variables are in, Integer or
Continuous(default)
:return: A dictionary of LP Variables
"""
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
d = {}
if len(indexs) == 0:
for i in index:
d[i] = LpVariable(name % tuple(indexStart + [str(i)]), lowBound, upBound, cat)
else:
for i in index:
d[i] = LpVariable.dicts(name, indexs, lowBound, upBound, cat, indexStart + [i])
return d
@classmethod
def dict(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
lists = indexs
if len(indexs)>1:
# Cartesian product
res = []
while len(lists):
first = lists[-1]
nres = []
if res:
if first:
for f in first:
nres.extend([[f]+r for r in res])
else:
nres = res
res = nres
else:
res = [[f] for f in first]
lists = lists[:-1]
index = [tuple(r) for r in res]
elif len(indexs) == 1:
index = indexs[0]
else:
return {}
d = dict((i, cls(name % i, lowBound, upBound, cat)) for i in index)
return d
def getLb(self):
return self.lowBound
def getUb(self):
return self.upBound
def bounds(self, low, up):
self.lowBound = low
self.upBound = up
def positive(self):
self.lowBound = 0
self.upBound = None
def value(self):
return self.varValue
def round(self, epsInt = 1e-5, eps = 1e-7):
if self.varValue is not None:
if self.upBound != None and self.varValue > self.upBound and self.varValue <= self.upBound + eps:
self.varValue = self.upBound
elif self.lowBound != None and self.varValue < self.lowBound and self.varValue >= self.lowBound - eps:
self.varValue = self.lowBound
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) <= epsInt:
self.varValue = round(self.varValue)
def roundedValue(self, eps = 1e-5):
if self.cat == LpInteger and self.varValue != None \
and abs(self.varValue - round(self.varValue)) <= eps:
return round(self.varValue)
else:
return self.varValue
def valueOrDefault(self):
if self.varValue != None:
return self.varValue
elif self.lowBound != None:
if self.upBound != None:
if 0 >= self.lowBound and 0 <= self.upBound:
return 0
else:
if self.lowBound >= 0:
return self.lowBound
else:
return self.upBound
else:
if 0 >= self.lowBound:
return 0
else:
return self.lowBound
elif self.upBound != None:
if 0 <= self.upBound:
return 0
else:
return self.upBound
else:
return 0
def valid(self, eps):
if self.varValue == None: return False
if self.upBound != None and self.varValue > self.upBound + eps:
return False
if self.lowBound != None and self.varValue < self.lowBound - eps:
return False
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) > eps:
return False
return True
def infeasibilityGap(self, mip = 1):
if self.varValue == None: raise ValueError, "variable value is None"
if self.upBound != None and self.varValue > self.upBound:
return self.varValue - self.upBound
if self.lowBound != None and self.varValue < self.lowBound:
return self.varValue - self.lowBound
if mip and self.cat == LpInteger and round(self.varValue) - self.varValue != 0:
return round(self.varValue) - self.varValue
return 0
def isBinary(self):
return self.cat == LpInteger and self.lowBound == 0 and self.upBound == 1
def isInteger(self):
return self.cat == LpInteger
def isFree(self):
return self.lowBound == None and self.upBound == None
def isConstant(self):
return self.lowBound != None and self.upBound == self.lowBound
def isPositive(self):
return self.lowBound == 0 and self.upBound == None
def asCplexLpVariable(self):
if self.isFree(): return self.name + " free"
if self.isConstant(): return self.name + " = %.12g" % self.lowBound
if self.lowBound == None:
s= "-inf <= "
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
elif (self.lowBound == 0 and self.cat == LpContinuous):
s = ""
else:
s= "%.12g <= " % self.lowBound
s += self.name
if self.upBound != None:
s+= " <= %.12g" % self.upBound
return s
def asCplexLpAffineExpression(self, name, constant = 1):
return LpAffineExpression(self).asCplexLpAffineExpression(name, constant)
def __ne__(self, other):
if isinstance(other, LpElement):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
def addVariableToConstraints(self,e):
"""adds a variable to the constraints indicated by
the LpConstraintVars in e
"""
for constraint, coeff in e.items():
constraint.addVariable(self,coeff)
def setInitialValue(self,val):
"""sets the initial value of the Variable to val
may of may not be supported by the solver
"""
raise NotImplementedError
class LpAffineExpression(_DICT_TYPE):
"""
A linear combination of :class:`LpVariables<LpVariable>`.
Can be initialised with the following:
#. e = None: an empty Expression
#. e = dict: gives an expression with the values being the coefficients of the keys (order of terms is undetermined)
#. e = list or generator of 2-tuples: equivalent to dict.items()
#. e = LpElement: an expression of length 1 with the coefficient 1
#. e = other: the constant is initialised as e
Examples:
>>> f=LpAffineExpression(LpElement('x'))
>>> f
1*x + 0
>>> x_name = ['x_0', 'x_1', 'x_2']
>>> x = [LpVariable(x_name[i], lowBound = 0, upBound = 10) for i in range(3) ]
>>> c = LpAffineExpression([ (x[0],1), (x[1],-3), (x[2],4)])
>>> c
1*x_0 + -3*x_1 + 4*x_2 + 0
"""
#to remove illegal characters from the names
trans = string.maketrans("-+[] ","_____")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget=getName, fset=setName)
def __init__(self, e = None, constant = 0, name = None):
self.name = name
#TODO remove isinstance usage
if e is None:
e = {}
if isinstance(e, LpAffineExpression):
# Will not copy the name
self.constant = e.constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, dict):
self.constant = constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, list) or isinstance(e, GeneratorType):
self.constant = constant
super(LpAffineExpression, self).__init__(e)
elif isinstance(e,LpElement):
self.constant = 0
super(LpAffineExpression, self).__init__( [(e, 1)])
else:
self.constant = e
super(LpAffineExpression, self).__init__()
# Proxy functions for variables
def isAtomic(self):
return len(self) == 1 and self.constant == 0 and self.values()[0] == 1
def isNumericalConstant(self):
return len(self) == 0
def atom(self):
return self.keys()[0]
# Functions on expressions
def __nonzero__(self):
return float(self.constant) != 0 or len(self)
def value(self):
s = self.constant
for v,x in self.iteritems():
if v.varValue is None:
return None
s += v.varValue * x
return s
def valueOrDefault(self):
s = self.constant
for v,x in self.iteritems():
s += v.valueOrDefault() * x
return s
def addterm(self, key, value):
y = self.get(key, 0)
if y:
y += value
self[key] = y
else:
self[key] = value
def emptyCopy(self):
return LpAffineExpression()
def copy(self):
"""Make a copy of self except the name which is reset"""
# Will not copy the name
return LpAffineExpression(self)
def __str__(self, constant = 1):
s = ""
for v in self.sorted_keys():
val = self[v]
if val<0:
if s != "": s += " - "
else: s += "-"
val = -val
elif s != "": s += " + "
if val == 1: s += str(v)
else: s += str(val) + "*" + str(v)
if constant:
if s == "":
s = str(self.constant)
else:
if self.constant < 0: s += " - " + str(-self.constant)
elif self.constant > 0: s += " + " + str(self.constant)
elif s == "":
s = "0"
return s
def sorted_keys(self):
"""
returns the list of keys sorted by name
"""
result = [(v.name, v) for v in self.keys()]
result.sort()
result = [v for _, v in result]
return result
def __repr__(self):
l = [str(self[v]) + "*" + str(v)
for v in self.sorted_keys()]
l.append(str(self.constant))
s = " + ".join(l)
return s
@staticmethod
def _count_characters(line):
#counts the characters in a list of strings
return sum(len(t) for t in line)
def asCplexVariablesOnly(self, name):
"""
helper for asCplexLpAffineExpression
"""
result = []
line = ["%s:" % name]
notFirst = 0
variables = self.sorted_keys()
for v in variables:
val = self[v]
if val < 0:
sign = " -"
val = -val
elif notFirst:
sign = " +"
else:
sign = ""
notFirst = 1
if val == 1:
term = "%s %s" %(sign, v.name)
else:
term = "%s %.12g %s" % (sign, val, v.name)
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
return result, line
def asCplexLpAffineExpression(self, name, constant = 1):
"""
returns a string that represents the Affine Expression in lp format
"""
#refactored to use a list for speed in iron python
result, line = self.asCplexVariablesOnly(name)
if not self:
term = " %s" % self.constant
else:
term = ""
if constant:
if self.constant < 0:
term = " - %s" % (-self.constant)
elif self.constant > 0:
term = " + %s" % self.constant
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line += [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def addInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, 1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.addInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant += other.constant
for v,x in other.iteritems():
self.addterm(v, x)
elif isinstance(other,dict):
for e in other.itervalues():
self.addInPlace(e)
else:
self.constant += other
return self
def subInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, -1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.subInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant -= other.constant
for v,x in other.iteritems():
self.addterm(v, -x)
elif isinstance(other,dict):
for e in other.itervalues():
self.subInPlace(e)
else:
self.constant -= other
return self
def __neg__(self):
e = self.emptyCopy()
e.constant = - self.constant
for v,x in self.iteritems():
e[v] = - x
return e
def __pos__(self):
return self
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
e = self.emptyCopy()
if isinstance(other,LpAffineExpression):
e.constant = self.constant * other.constant
if len(other):
if len(self):
raise TypeError, "Non-constant expressions cannot be multiplied"
else:
c = self.constant
if c != 0:
for v,x in other.iteritems():
e[v] = c * x
else:
c = other.constant
if c != 0:
for v,x in self.iteritems():
e[v] = c * x
elif isinstance(other,LpVariable):
return self * LpAffineExpression(other)
else:
if other != 0:
e.constant = self.constant * other
for v,x in self.iteritems():
e[v] = other * x
return e
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpAffineExpression) or isinstance(other,LpVariable):
if len(other):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
other = other.constant
e = self.emptyCopy()
e.constant = self.constant / other
for v,x in self.iteritems():
e[v] = x / other
return e
def __rdiv__(self, other):
e = self.emptyCopy()
if len(self):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
c = self.constant
if isinstance(other,LpAffineExpression):
e.constant = other.constant / c
for v,x in other.iteritems():
e[v] = x / c
else:
e.constant = other / c
return e
def __le__(self, other):
return LpConstraint(self - other, LpConstraintLE)
def __ge__(self, other):
return LpConstraint(self - other, LpConstraintGE)
def __eq__(self, other):
return LpConstraint(self - other, LpConstraintEQ)
class LpConstraint(LpAffineExpression):
"""An LP constraint"""
def __init__(self, e = None, sense = LpConstraintEQ,
name = None, rhs = None):
"""
:param e: an instance of :class:`LpAffineExpression`
:param sense: one of :data:`~pulp.constants.LpConstraintEQ`, :data:`~pulp.constants.LpConstraintGE`, :data:`~pulp.constants.LpConstraintLE` (0, 1, -1 respectively)
:param name: identifying string
:param rhs: numerical value of constraint target
"""
LpAffineExpression.__init__(self, e, name = name)
if rhs is not None:
self.constant = - rhs
self.sense = sense
self.modified = True
def getLb(self):
if ( (self.sense == LpConstraintGE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def getUb(self):
if ( (self.sense == LpConstraintLE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def __str__(self):
s = LpAffineExpression.__str__(self, 0)
if self.sense:
s += " " + LpConstraintSenses[self.sense] + " " + str(-self.constant)
return s
def asCplexLpConstraint(self, name):
"""
Returns a constraint as a string
"""
result, line = self.asCplexVariablesOnly(name)
if not self.keys():
line += ["0"]
c = -self.constant
if c == 0:
c = 0 # Supress sign
term = " %s %.12g" % (LpConstraintSenses[self.sense], c)
if self._count_characters(line)+len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def changeRHS(self, RHS):
"""
alters the RHS of a constraint so that it can be modified in a resolve
"""
self.constant = -RHS
self.modified = True
def __repr__(self):
s = LpAffineExpression.__repr__(self)
if self.sense is not None:
s += " " + LpConstraintSenses[self.sense] + " 0"
return s
def copy(self):
"""Make a copy of self"""
return LpConstraint(self, self.sense)
def emptyCopy(self):
return LpConstraint(sense = self.sense)
def addInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense >= 0:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
else:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
elif isinstance(other,list):
for e in other:
self.addInPlace(e)
else:
LpAffineExpression.addInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def subInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense <= 0:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
else:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
elif isinstance(other,list):
for e in other:
self.subInPlace(e)
else:
LpAffineExpression.subInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def __neg__(self):
c = LpAffineExpression.__neg__(self)
c.sense = - c.sense
return c
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__mul__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__div__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rdiv__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__rdiv__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def valid(self, eps = 0):
val = self.value()
if self.sense == LpConstraintEQ: return abs(val) <= eps
else: return val * self.sense >= - eps
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables to a hard constraint
uses FixedElasticSubProblem
"""
return FixedElasticSubProblem(self, *args, **kwargs)
class LpFractionConstraint(LpConstraint):
"""
Creates a constraint that enforces a fraction requirement a/b = c
"""
def __init__(self, numerator, denominator = None, sense = LpConstraintEQ,
RHS = 1.0, name = None,
complement = None):
"""
creates a fraction Constraint to model constraints of
the nature
numerator/denominator {==, >=, <=} RHS
numerator/(numerator + complement) {==, >=, <=} RHS
:param numerator: the top of the fraction
:param denominator: as described above
:param sense: the sense of the relation of the constraint
:param RHS: the target fraction value
:param complement: as described above
"""
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
self.denominator = denominator
self.complement = complement
lhs = self.numerator - RHS * self.denominator
LpConstraint.__init__(self, lhs,
sense = sense, rhs = 0, name = name)
self.RHS = RHS
def findLHSValue(self):
"""
Determines the value of the fraction in the constraint after solution
"""
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables and splitting the
hard constraint
uses FractionElasticSubProblem
"""
return FractionElasticSubProblem(self, *args, **kwargs)
class LpConstraintVar(LpElement):
"""A Constraint that can be treated as a variable when constructing
a LpProblem by columns
"""
def __init__(self, name = None ,sense = None,
rhs = None, e = None):
LpElement.__init__(self,name)
self.constraint = LpConstraint(name = self.name, sense = sense,
rhs = rhs , e = e)
def addVariable(self, var, coeff):
"""
Adds a variable to the constraint with the
activity coeff
"""
self.constraint.addterm(var, coeff)
def value(self):
return self.constraint.value()
class LpProblem(object):
"""An LP Problem"""
def __init__(self, name = "NoName", sense = LpMinimize):
"""
Creates an LP Problem
This function creates a new LP Problem with the specified associated parameters
:param name: name of the problem used in the output .lp file
:param sense: of the LP problem objective. \
Either :data:`~pulp.constants.LpMinimize` (default) \
or :data:`~pulp.constants.LpMaximize`.
:return: An LP Problem
"""
self.objective = None
self.constraints = _DICT_TYPE()
self.name = name
self.sense = sense
self.sos1 = {}
self.sos2 = {}
self.status = LpStatusNotSolved
self.noOverlap = 1
self.solver = None
self.initialValues = {}
self.resolveOK = False
self._variables = []
self._variable_ids = {} #old school using dict.keys() for a set
self.dummyVar = None
# locals
self.lastUnused = 0
def __repr__(self):
string = self.name+":\n"
if self.sense == 1:
string += "MINIMIZE\n"
else:
string += "MAXIMIZE\n"
string += repr(self.objective) +"\n"
if self.constraints:
string += "SUBJECT TO\n"
for n, c in self.constraints.iteritems():
string += c.asCplexLpConstraint(n) +"\n"
string += "VARIABLES\n"
for v in self.variables():
string += v.asCplexLpVariable() + " " + LpCategories[v.cat] + "\n"
return string
def copy(self):
"""Make a copy of self. Expressions are copied by reference"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
lpcopy.objective = self.objective
lpcopy.constraints = self.constraints.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def deepcopy(self):
"""Make a copy of self. Expressions are copied by value"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
if self.objective is not None:
lpcopy.objective = self.objective.copy()
lpcopy.constraints = {}
for k,v in self.constraints.iteritems():
lpcopy.constraints[k] = v.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def normalisedNames(self):
constraintsNames = {}
i = 0
for k in self.constraints:
constraintsNames[k] = "C%07d" % i
i += 1
variablesNames = {}
i = 0
for k in self.variables():
variablesNames[k.name] = "X%07d" % i
i += 1
return constraintsNames, variablesNames, "OBJ"
def isMIP(self):
for v in self.variables():
if v.cat == LpInteger: return 1
return 0
def roundSolution(self, epsInt = 1e-5, eps = 1e-7):
"""
Rounds the lp variables
Inputs:
- none
Side Effects:
- The lp variables are rounded
"""
for v in self.variables():
v.round(epsInt, eps)
def unusedConstraintName(self):
self.lastUnused += 1
while 1:
s = "_C%d" % self.lastUnused
if s not in self.constraints: break
self.lastUnused += 1
return s
def valid(self, eps = 0):
for v in self.variables():
if not v.valid(eps): return False
for c in self.constraints.itervalues():
if not c.valid(eps): return False
else:
return True
def infeasibilityGap(self, mip = 1):
gap = 0
for v in self.variables():
gap = max(abs(v.infeasibilityGap(mip)), gap)
for c in self.constraints.itervalues():
if not c.valid(0):
gap = max(abs(c.value()), gap)
return gap
def addVariable(self, variable):
"""
Adds a variable to the problem before a constraint is added
@param variable: the variable to be added
"""
if id(variable) not in self._variable_ids:
self._variables.append(variable)
self._variable_ids[id(variable)] = variable
def addVariables(self, variables):
"""
Adds variables to the problem before a constraint is added
@param variables: the variables to be added
"""
for v in variables:
self.addVariable(v)
def variables(self):
"""
Returns a list of the problem variables
Inputs:
- none
Returns:
- A list of the problem variables
"""
if self.objective:
self.addVariables(self.objective.keys())
for c in self.constraints.itervalues():
self.addVariables(c.keys())
variables = self._variables
#sort the varibles DSU
variables = [[v.name, v] for v in variables]
variables.sort()
variables = [v for _, v in variables]
return variables
def variablesDict(self):
variables = {}
if self.objective:
for v in self.objective:
variables[v.name] = v
for c in self.constraints.values():
for v in c:
variables[v.name] = v
return variables
def add(self, constraint, name = None):
self.addConstraint(constraint, name)
def addConstraint(self, constraint, name = None):
if not isinstance(constraint, LpConstraint):
raise TypeError, "Can only add LpConstraint objects"
if name:
constraint.name = name
try:
if constraint.name:
name = constraint.name
else:
name = self.unusedConstraintName()
except AttributeError:
raise TypeError, "Can only add LpConstraint objects"
#removed as this test fails for empty constraints
# if len(constraint) == 0:
# if not constraint.valid():
# raise ValueError, "Cannot add false constraints"
if name in self.constraints:
if self.noOverlap:
raise PulpError, "overlapping constraint names: " + name
else:
print "Warning: overlapping constraint names:", name
self.constraints[name] = constraint
self.addVariables(constraint.keys())
def setObjective(self,obj):
"""
Sets the input variable as the objective function. Used in Columnwise Modelling
:param obj: the objective function of type :class:`LpConstraintVar`
Side Effects:
- The objective function is set
"""
if isinstance(obj, LpVariable):
# allows the user to add a LpVariable as an objective
obj = obj + 0.0
try:
obj = obj.constraint
name = obj.name
except AttributeError:
name = None
self.objective = obj
self.objective.name = name
self.resolveOK = False
def __iadd__(self, other):
if isinstance(other, tuple):
other, name = other
else:
name = None
if other is True:
return self
if isinstance(other, LpConstraintVar):
self.addConstraint(other.constraint)
elif isinstance(other, LpConstraint):
self.addConstraint(other, name)
elif isinstance(other, LpAffineExpression):
self.objective = other
self.objective.name = name
elif isinstance(other, LpVariable) or type(other) in [int, float]:
self.objective = LpAffineExpression(other)
self.objective.name = name
else:
raise TypeError, "Can only add LpConstraintVar, LpConstraint, LpAffineExpression or True objects"
return self
def extend(self, other, use_objective = True):
"""
extends an LpProblem by adding constraints either from a dictionary
a tuple or another LpProblem object.
@param use_objective: determines whether the objective is imported from
the other problem
For dictionaries the constraints will be named with the keys
For tuples an unique name will be generated
For LpProblems the name of the problem will be added to the constraints
name
"""
if isinstance(other, dict):
for name in other:
self.constraints[name] = other[name]
elif isinstance(other, LpProblem):
for v in set(other.variables()).difference(self.variables()):
v.name = other.name + v.name
for name,c in other.constraints.iteritems():
c.name = other.name + name
self.addConstraint(c)
if use_objective:
self.objective += other.objective
else:
for c in other:
if isinstance(c,tuple):
name = c[0]
c = c[1]
else:
name = None
if not name: name = c.name
if not name: name = self.unusedConstraintName()
self.constraints[name] = c
def coefficients(self, translation = None):
coefs = []
if translation == None:
for c in self.constraints:
cst = self.constraints[c]
coefs.extend([(v.name, c, cst[v]) for v in cst])
else:
for c in self.constraints:
ctr = translation[c]
cst = self.constraints[c]
coefs.extend([(translation[v.name], ctr, cst[v]) for v in cst])
return coefs
def writeMPS(self, filename, mpsSense = 0, rename = 0, mip = 1):
wasNone, dummyVar = self.fixObjective()
f = file(filename, "w")
if mpsSense == 0: mpsSense = self.sense
cobj = self.objective
if mpsSense != self.sense:
n = cobj.name
cobj = - cobj
cobj.name = n
if rename:
constraintsNames, variablesNames, cobj.name = self.normalisedNames()
f.write("*SENSE:"+LpSenses[mpsSense]+"\n")
n = self.name
if rename: n = "MODEL"
f.write("NAME "+n+"\n")
vs = self.variables()
# constraints
f.write("ROWS\n")
objName = cobj.name
if not objName: objName = "OBJ"
f.write(" N %s\n" % objName)
mpsConstraintType = {LpConstraintLE:"L", LpConstraintEQ:"E", LpConstraintGE:"G"}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
f.write(" "+mpsConstraintType[c.sense]+" "+k+"\n")
# matrix
f.write("COLUMNS\n")
# Creation of a dict of dict:
# coefs[nomVariable][nomContrainte] = coefficient
coefs = {}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
for v in c:
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
coefs[n][k] = c[v]
else:
coefs[n] = {k:c[v]}
for v in vs:
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTORG'\n")
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
cv = coefs[n]
# Most of the work is done here
for k in cv: f.write(" %-8s %-8s % .5e\n" % (n,k,cv[k]))
# objective function
if v in cobj: f.write(" %-8s %-8s % .5e\n" % (n,objName,cobj[v]))
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTEND'\n")
# right hand side
f.write("RHS\n")
for k,c in self.constraints.iteritems():
c = -c.constant
if rename: k = constraintsNames[k]
if c == 0: c = 0
f.write(" RHS %-8s % .5e\n" % (k,c))
# bounds
f.write("BOUNDS\n")
for v in vs:
n = v.name
if rename: n = variablesNames[n]
if v.lowBound != None and v.lowBound == v.upBound:
f.write(" FX BND %-8s % .5e\n" % (n, v.lowBound))
elif v.lowBound == 0 and v.upBound == 1 and mip and v.cat == LpInteger:
f.write(" BV BND %-8s\n" % n)
else:
if v.lowBound != None:
# In MPS files, variables with no bounds (i.e. >= 0)
# are assumed BV by COIN and CPLEX.
# So we explicitly write a 0 lower bound in this case.
if v.lowBound != 0 or (mip and v.cat == LpInteger and v.upBound == None):
f.write(" LO BND %-8s % .5e\n" % (n, v.lowBound))
else:
if v.upBound != None:
f.write(" MI BND %-8s\n" % n)
else:
f.write(" FR BND %-8s\n" % n)
if v.upBound != None:
f.write(" UP BND %-8s % .5e\n" % (n, v.upBound))
f.write("ENDATA\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
# returns the variables, in writing order
if rename == 0:
return vs
else:
return vs, variablesNames, constraintsNames, cobj.name
def writeLP(self, filename, writeSOS = 1, mip = 1):
"""
Write the given Lp problem to a .lp file.
This function writes the specifications (objective function,
constraints, variables) of the defined Lp problem to a file.
:param filename: the name of the file to be created.
Side Effects:
- The file is created.
"""
f = file(filename, "w")
f.write("\\* "+self.name+" *\\\n")
if self.sense == 1:
f.write("Minimize\n")
else:
f.write("Maximize\n")
wasNone, dummyVar = self.fixObjective()
objName = self.objective.name
if not objName: objName = "OBJ"
f.write(self.objective.asCplexLpAffineExpression(objName, constant = 0))
f.write("Subject To\n")
ks = self.constraints.keys()
ks.sort()
for k in ks:
constraint = self.constraints[k]
if not constraint.keys():
#empty constraint add the dummyVar
constraint += self.get_dummyVar()
f.write(constraint.asCplexLpConstraint(k))
vs = self.variables()
# check if any names are longer than 100 characters
long_names = [v.name for v in vs if len(v.name) > 100]
if long_names:
raise PulpError('Variable names too long for Lp format\n'
+ str(long_names))
# check for repeated names
repeated_names = {}
for v in vs:
repeated_names[v.name] = repeated_names.get(v.name, 0) + 1
repeated_names = [(key, value) for key, value in repeated_names.items()
if value >= 2]
if repeated_names:
raise PulpError('Repeated variable names in Lp format\n'
+ str(repeated_names))
# Bounds on non-"positive" variables
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
if mip:
vg = [v for v in vs if not (v.isPositive() and v.cat == LpContinuous) \
and not v.isBinary()]
else:
vg = [v for v in vs if not v.isPositive()]
if vg:
f.write("Bounds\n")
for v in vg:
f.write("%s\n" % v.asCplexLpVariable())
# Integer non-binary variables
if mip:
vg = [v for v in vs if v.cat == LpInteger and not v.isBinary()]
if vg:
f.write("Generals\n")
for v in vg: f.write("%s\n" % v.name)
# Binary variables
vg = [v for v in vs if v.isBinary()]
if vg:
f.write("Binaries\n")
for v in vg: f.write("%s\n" % v.name)
# Special Ordered Sets
if writeSOS and (self.sos1 or self.sos2):
f.write("SOS\n")
if self.sos1:
for sos in self.sos1.itervalues():
f.write("S1:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
if self.sos2:
for sos in self.sos2.itervalues():
f.write("S2:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
f.write("End\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
def assignVarsVals(self, values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].varValue = values[name]
def assignVarsDj(self,values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].dj = values[name]
def assignConsPi(self, values):
for name in values:
self.constraints[name].pi = values[name]
def assignConsSlack(self, values, activity=False):
for name in values:
if activity:
#reports the activitynot the slack
self.constraints[name].slack = -(self.constraints[name].constant + float(values[name]))
else:
self.constraints[name].slack = float(values[name])
def get_dummyVar(self):
if self.dummyVar is None:
self.dummyVar = LpVariable("__dummy", 0, 0)
return self.dummyVar
def fixObjective(self):
if self.objective is None:
self.objective = 0
wasNone = 1
else:
wasNone = 0
if not isinstance(self.objective, LpAffineExpression):
self.objective = LpAffineExpression(self.objective)
if self.objective.isNumericalConstant():
dummyVar = self.get_dummyVar()
self.objective += dummyVar
else:
dummyVar = None
return wasNone, dummyVar
def restoreObjective(self, wasNone, dummyVar):
if wasNone:
self.objective = None
elif not dummyVar is None:
self.objective -= dummyVar
def solve(self, solver = None, **kwargs):
"""
Solve the given Lp problem.
This function changes the problem to make it suitable for solving
then calls the solver.actualSolve() method to find the solution
:param solver: Optional: the specific solver to be used, defaults to the
default solver.
Side Effects:
- The attributes of the problem object are changed in
:meth:`~pulp.solver.LpSolver.actualSolve()` to reflect the Lp solution
"""
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
wasNone, dummyVar = self.fixObjective()
#time it
self.solutionTime = -clock()
status = solver.actualSolve(self, **kwargs)
self.solutionTime += clock()
self.restoreObjective(wasNone, dummyVar)
self.solver = solver
return status
def sequentialSolve(self, objectives, absoluteTols = None,
relativeTols = None, solver = None, debug = False):
"""
Solve the given Lp problem with several objective functions.
This function sequentially changes the objective of the problem
and then adds the objective function as a constraint
:param objectives: the list of objectives to be used to solve the problem
:param absoluteTols: the list of absolute tolerances to be applied to
the constraints should be +ve for a minimise objective
:param relativeTols: the list of relative tolerances applied to the constraints
:param solver: the specific solver to be used, defaults to the default solver.
"""
#TODO Add a penalty variable to make problems elastic
#TODO add the ability to accept different status values i.e. infeasible etc
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
if not(absoluteTols):
absoluteTols = [0] * len(objectives)
if not(relativeTols):
relativeTols = [1] * len(objectives)
#time it
self.solutionTime = -clock()
statuses = []
for i,(obj,absol,rel) in enumerate(zip(objectives, absoluteTols, relativeTols)):
self.setObjective(obj)
status = solver.actualSolve(self)
statuses.append(status)
if debug: self.writeLP("%sSequence.lp"%i)
if self.sense == LpMinimize:
self += obj <= value(obj)*rel + absol,"%s_Sequence_Objective"%i
elif self.sense == LpMaximize:
self += obj >= value(obj)*rel + absol,"%s_Sequence_Objective"%i
self.solutionTime += clock()
self.solver = solver
return statuses
def resolve(self, solver = None, **kwargs):
"""
resolves an Problem using the same solver as previously
"""
if not(solver): solver = self.solver
if self.resolveOK:
return self.solver.actualResolve(self, **kwargs)
else:
logging.warn('resolve not ok. solving instead')
return self.solve(solver=solver, **kwargs)
def setSolver(self,solver = LpSolverDefault):
"""Sets the Solver for this problem useful if you are using
resolve
"""
self.solver = solver
def setInitial(self,values):
self.initialValues = values
class FixedElasticSubProblem(LpProblem):
"""
Contains the subproblem generated by converting a fixed constraint
:math:`\sum_{i}a_i x_i = b` into an elastic constraint.
:param constraint: The LpConstraint that the elastic constraint is based on
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound:
the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on \
constraint violation that is free from penalty, expressed as a list\
where [-ve, +ve]
"""
def __init__(self, constraint, penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % constraint.name
LpProblem.__init__(self, subProblemName, LpMinimize)
self.objective = LpAffineExpression()
self.constraint = constraint
self.constant = constraint.constant
self.RHS = - constraint.constant
self.objective = LpAffineExpression()
self += constraint, "_Constraint"
#create and add these variables but disabled
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
constraint.addInPlace(self.freeVar + self.lowVar + self.upVar)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
#add a costless variable
self.freeVar.upBound = abs(constraint.constant *
proportionFreeBoundList[0])
self.freeVar.lowBound = -abs(constraint.constant *
proportionFreeBoundList[1])
# Note the reversal of the upbound and lowbound due to the nature of the
# variable
if penalty is not None:
#activate these variables
self.upVar.upBound = None
self.lowVar.lowBound = None
self.objective = penalty*self.upVar - penalty*self.lowVar
def _findValue(self, attrib):
"""
safe way to get the value of a variable that may not exist
"""
var = getattr(self, attrib, 0)
if var:
if value(var) is not None:
return value(var)
else:
return 0.0
else:
return 0.0
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
result = abs(upVar + lowVar) >= EPS
if result:
logging.debug("isViolated %s, upVar %s, lowVar %s, freeVar %s result %s"%(
self.name, upVar, lowVar, freeVar, result))
logging.debug("isViolated value lhs %s constant %s"%(
self.findLHSValue(), self.RHS))
return result
def findDifferenceFromRHS(self):
"""
The amount the actual value varies from the RHS (sense: LHS - RHS)
"""
return self.findLHSValue() - self.RHS
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
return self.constraint.value() - self.constant - \
upVar - lowVar - freeVar
def deElasticize(self):
""" de-elasticize constraint """
self.upVar.upBound = 0
self.lowVar.lowBound = 0
def reElasticize(self):
"""
Make the Subproblem elastic again after deElasticize
"""
self.upVar.lowBound = 0
self.upVar.upBound = None
self.lowVar.upBound = 0
self.lowVar.lowBound = None
def alterName(self, name):
"""
Alters the name of anonymous parts of the problem
"""
self.name = "%s_elastic_SubProblem" % name
if hasattr(self, 'freeVar'):
self.freeVar.name = self.name + "_free_bound"
if hasattr(self, 'upVar'):
self.upVar.name = self.name + "_pos_penalty_var"
if hasattr(self, 'lowVar'):
self.lowVar.name = self.name + "_neg_penalty_var"
class FractionElasticSubProblem(FixedElasticSubProblem):
"""
Contains the subproblem generated by converting a Fraction constraint
numerator/(numerator+complement) = b
into an elastic constraint
:param name: The name of the elastic subproblem
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound: the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on
constraint violation that is free from penalty, expressed as a list
where [-ve, +ve]
"""
def __init__(self, name, numerator, RHS, sense,
complement = None,
denominator = None,
penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % name
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
raise PulpError, 'only one of denominator and complement must be specified'
self.RHS = RHS
self.lowTarget = self.upTarget = None
LpProblem.__init__(self, subProblemName, LpMinimize)
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
upProportionFreeBound, lowProportionFreeBound = \
proportionFreeBoundList
else:
upProportionFreeBound, lowProportionFreeBound = (0, 0)
#create an objective
self += LpAffineExpression()
#There are three cases if the constraint.sense is ==, <=, >=
if sense in [LpConstraintEQ, LpConstraintLE]:
#create a constraint the sets the upper bound of target
self.upTarget = RHS + upProportionFreeBound
self.upConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintLE,
self.upTarget,
denominator = self.denominator)
if penalty is not None:
self.lowVar.lowBound = None
self.objective += -1* penalty * self.lowVar
self.upConstraint += self.lowVar
self += self.upConstraint, '_upper_constraint'
if sense in [LpConstraintEQ, LpConstraintGE]:
#create a constraint the sets the lower bound of target
self.lowTarget = RHS - lowProportionFreeBound
self.lowConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintGE,
self.lowTarget,
denominator = self.denominator)
if penalty is not None:
self.upVar.upBound = None
self.objective += penalty * self.upVar
self.lowConstraint += self.upVar
self += self.lowConstraint, '_lower_constraint'
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
# uses code from LpFractionConstraint
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
if abs(value(self.denominator))>= EPS:
if self.lowTarget is not None:
if self.lowTarget > self.findLHSValue():
return True
if self.upTarget is not None:
if self.findLHSValue() > self.upTarget:
return True
else:
#if the denominator is zero the constraint is satisfied
return False
class LpVariableDict(dict):
"""An LP variable generator"""
def __init__(self, name, data = {}, lowBound = None, upBound = None, cat = LpContinuous):
self.name = name
dict.__init__(self, data)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
self[key] = LpVariable(name % key, lowBound, upBound, cat)
return self[key]
# Utility functions
def lpSum(vector):
"""
Calculate the sum of a list of linear expressions
:param vector: A list of linear expressions
"""
return LpAffineExpression().addInPlace(vector)
def lpDot(v1, v2):
"""Calculate the dot product of two lists of linear expressions"""
if not isiterable(v1) and not isiterable(v2):
return v1 * v2
elif not isiterable(v1):
return lpDot([v1]*len(v2),v2)
elif not isiterable(v2):
return lpDot(v1,[v2]*len(v1))
else:
return lpSum([lpDot(e1,e2) for e1,e2 in zip(v1,v2)])
def isNumber(x):
"""Returns true if x is an int of a float"""
return type(x) in [int, float]
def value(x):
"""Returns the value of the variable/expression x, or x if it is a number"""
if isNumber(x): return x
else: return x.value()
def valueOrDefault(x):
"""Returns the value of the variable/expression x, or x if it is a number
Variable without value (None) are affected a possible value (within their
bounds)."""
if isNumber(x): return x
else: return x.valueOrDefault()
def combination(orgset, k = None):
"""
returns an iterator that lists the combinations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = combination([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
try:
import probstat
return probstat.Combination(orgset,k)
except(ImportError):
return __combination(orgset,k)
def __combination(orgset,k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __combination(orgset[i+1:],k-1):
yield (x,) + s
def permutation(orgset, k = None):
"""
returns an iterator that lists the permutations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = permutation([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
try:
import probstat
return probstat.Permutation(orgset, k)
except(ImportError):
return __permutation(orgset, k)
def __permutation(orgset, k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __permutation(orgset[:i] + orgset[i+1:],k-1):
yield (x,)+ s
def allpermutations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allpermutations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
return itertools.chain(*[permutation(orgset,i) for i in range(1,k+1)])
def allcombinations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allcombinations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
return itertools.chain(*[combination(orgset,i) for i in range(1,k+1)])
def makeDict(headers, array, default = None):
"""
makes a list into a dictionary with the headings given in headings
headers is a list of header lists
array is a list with the data
"""
result, defdict = __makeDict(headers, array, default)
return result
def __makeDict(headers, array, default = None):
#this is a recursive function so end the recursion as follows
result ={}
returndefaultvalue = None
if len(headers) == 1:
result.update(dict(zip(headers[0],array)))
defaultvalue = default
else:
for i,h in enumerate(headers[0]):
result[h],defaultvalue = __makeDict(headers[1:],array[i],default)
if default != None:
f = lambda :defaultvalue
defresult = collections.defaultdict(f)
defresult.update(result)
result = defresult
returndefaultvalue = collections.defaultdict(f)
return result, returndefaultvalue
def splitDict(Data):
"""
Split a dictionary with lists as the data, into smaller dictionaries
:param Data: A dictionary with lists as the values
:return: A tuple of dictionaries each containing the data separately,
with the same dictionary keys
"""
# find the maximum number of items in the dictionary
maxitems = max([len(values) for values in Data.values()])
output =[dict() for i in range(maxitems)]
for key, values in Data.items():
for i, val in enumerate(values):
output[i][key] = val
return tuple(output)
def read_table(data, coerce_type, transpose=False):
'''
Reads in data from a simple table and forces it to be a particular type
This is a helper function that allows data to be easily constained in a
simple script
::return: a dictionary of with the keys being a tuple of the strings
in the first row and colum of the table
::param data: the multiline string containing the table data
::param coerce_type: the type that the table data is converted to
::param transpose: reverses the data if needed
Example:
>>> table_data = """
... L1 L2 L3 L4 L5 L6
... C1 6736 42658 70414 45170 184679 111569
... C2 217266 227190 249640 203029 153531 117487
... C3 35936 28768 126316 2498 130317 74034
... C4 73446 52077 108368 75011 49827 62850
... C5 174664 177461 151589 153300 59916 135162
... C6 186302 189099 147026 164938 149836 286307
... """
>>> table = read_table(table_data, int)
>>> table[("C1","L1")]
6736
>>> table[("C6","L5")]
149836
'''
lines = data.splitlines()
headings = lines[1].split()
result = {}
for row in lines[2:]:
items = row.split()
for i, item in enumerate(items[1:]):
if transpose:
key = (headings[i], items[0])
else:
key = (items[0], headings[i])
result[key] = coerce_type(item)
return result
def configSolvers():
"""
Configure the path the the solvers on the command line
Designed to configure the file locations of the solvers from the
command line after installation
"""
configlist = [(cplex_dll_path,"cplexpath","CPLEX: "),
(coinMP_path, "coinmppath","CoinMP dll (windows only): ")]
print ("Please type the full path including filename and extension \n" +
"for each solver available")
configdict = {}
for (default, key, msg) in configlist:
value = raw_input(msg + "[" + str(default) +"]")
if value:
configdict[key] = value
setConfigInformation(**configdict)
def pulpTestAll():
from tests import pulpTestSolver
solvers = [PULP_CBC_CMD,
CPLEX_DLL,
CPLEX_CMD,
CPLEX_PY,
COIN_CMD,
COINMP_DLL,
GLPK_CMD,
XPRESS,
GUROBI,
GUROBI_CMD,
PYGLPK,
YAPOSIB
]
for s in solvers:
if s().available():
#~ try:
pulpTestSolver(s)
print "* Solver", s, "passed."
#~ except Exception, e:
#~ print e
#~ print "* Solver", s, "failed."
else:
print "Solver", s, "unavailable."
def pulpDoctest():
"""
runs all doctests
"""
import doctest
if __name__ != '__main__':
import pulp
doctest.testmod(pulp)
else:
doctest.testmod()
if __name__ == '__main__':
# Tests
pulpTestAll()
pulpDoctest()
| false
| true
|
79076564e8ffe360136a266074ef0aa351938b76
| 3,361
|
py
|
Python
|
lnbits/extensions/lnurlp/lnurl.py
|
stepansnigirev/lnbits
|
82731dc901780b959d6ebecc4f61be137c8d2884
|
[
"MIT"
] | null | null | null |
lnbits/extensions/lnurlp/lnurl.py
|
stepansnigirev/lnbits
|
82731dc901780b959d6ebecc4f61be137c8d2884
|
[
"MIT"
] | null | null | null |
lnbits/extensions/lnurlp/lnurl.py
|
stepansnigirev/lnbits
|
82731dc901780b959d6ebecc4f61be137c8d2884
|
[
"MIT"
] | null | null | null |
import hashlib
import math
from http import HTTPStatus
from quart import jsonify, url_for, request
from lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse # type: ignore
from lnbits.core.services import create_invoice
from lnbits.utils.exchange_rates import get_fiat_rate_satoshis
from . import lnurlp_ext
from .crud import increment_pay_link
@lnurlp_ext.route("/api/v1/lnurl/<link_id>", methods=["GET"])
async def api_lnurl_response(link_id):
link = await increment_pay_link(link_id, served_meta=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
resp = LnurlPayResponse(
callback=url_for("lnurlp.api_lnurl_callback", link_id=link.id, _external=True),
min_sendable=math.ceil(link.min * rate) * 1000,
max_sendable=round(link.max * rate) * 1000,
metadata=link.lnurlpay_metadata,
)
params = resp.dict()
if link.comment_chars > 0:
params["commentAllowed"] = link.comment_chars
return jsonify(params), HTTPStatus.OK
@lnurlp_ext.route("/api/v1/lnurl/cb/<link_id>", methods=["GET"])
async def api_lnurl_callback(link_id):
link = await increment_pay_link(link_id, served_pr=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
min, max = link.min, link.max
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
if link.currency:
# allow some fluctuation (as the fiat price may have changed between the calls)
min = rate * 995 * link.min
max = rate * 1010 * link.max
else:
min = link.min * 1000
max = link.max * 1000
amount_received = int(request.args.get("amount"))
if amount_received < min:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is smaller than minimum {min}."
).dict()
),
HTTPStatus.OK,
)
elif amount_received > max:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is greater than maximum {max}."
).dict()
),
HTTPStatus.OK,
)
comment = request.args.get("comment")
if len(comment or "") > link.comment_chars:
return (
jsonify(
LnurlErrorResponse(
reason=f"Got a comment with {len(comment)} characters, but can only accept {link.comment_chars}"
).dict()
),
HTTPStatus.OK,
)
payment_hash, payment_request = await create_invoice(
wallet_id=link.wallet,
amount=int(amount_received / 1000),
memo=link.description,
description_hash=hashlib.sha256(
link.lnurlpay_metadata.encode("utf-8")
).digest(),
extra={"tag": "lnurlp", "link": link.id, "comment": comment},
)
resp = LnurlPayActionResponse(
pr=payment_request,
success_action=link.success_action(payment_hash),
routes=[],
)
return jsonify(resp.dict()), HTTPStatus.OK
| 32.009524
| 116
| 0.61321
|
import hashlib
import math
from http import HTTPStatus
from quart import jsonify, url_for, request
from lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse
from lnbits.core.services import create_invoice
from lnbits.utils.exchange_rates import get_fiat_rate_satoshis
from . import lnurlp_ext
from .crud import increment_pay_link
@lnurlp_ext.route("/api/v1/lnurl/<link_id>", methods=["GET"])
async def api_lnurl_response(link_id):
link = await increment_pay_link(link_id, served_meta=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
resp = LnurlPayResponse(
callback=url_for("lnurlp.api_lnurl_callback", link_id=link.id, _external=True),
min_sendable=math.ceil(link.min * rate) * 1000,
max_sendable=round(link.max * rate) * 1000,
metadata=link.lnurlpay_metadata,
)
params = resp.dict()
if link.comment_chars > 0:
params["commentAllowed"] = link.comment_chars
return jsonify(params), HTTPStatus.OK
@lnurlp_ext.route("/api/v1/lnurl/cb/<link_id>", methods=["GET"])
async def api_lnurl_callback(link_id):
link = await increment_pay_link(link_id, served_pr=1)
if not link:
return (
jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}),
HTTPStatus.OK,
)
min, max = link.min, link.max
rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1
if link.currency:
min = rate * 995 * link.min
max = rate * 1010 * link.max
else:
min = link.min * 1000
max = link.max * 1000
amount_received = int(request.args.get("amount"))
if amount_received < min:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is smaller than minimum {min}."
).dict()
),
HTTPStatus.OK,
)
elif amount_received > max:
return (
jsonify(
LnurlErrorResponse(
reason=f"Amount {amount_received} is greater than maximum {max}."
).dict()
),
HTTPStatus.OK,
)
comment = request.args.get("comment")
if len(comment or "") > link.comment_chars:
return (
jsonify(
LnurlErrorResponse(
reason=f"Got a comment with {len(comment)} characters, but can only accept {link.comment_chars}"
).dict()
),
HTTPStatus.OK,
)
payment_hash, payment_request = await create_invoice(
wallet_id=link.wallet,
amount=int(amount_received / 1000),
memo=link.description,
description_hash=hashlib.sha256(
link.lnurlpay_metadata.encode("utf-8")
).digest(),
extra={"tag": "lnurlp", "link": link.id, "comment": comment},
)
resp = LnurlPayActionResponse(
pr=payment_request,
success_action=link.success_action(payment_hash),
routes=[],
)
return jsonify(resp.dict()), HTTPStatus.OK
| true
| true
|
790765f7d394af7b8ea9033521908c3ce8929ca0
| 2,852
|
py
|
Python
|
defoe/alto/queries/keyword_concordance_by_word.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 2
|
2022-02-14T12:10:54.000Z
|
2022-02-14T12:35:44.000Z
|
defoe/alto/queries/keyword_concordance_by_word.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 17
|
2022-02-09T21:46:14.000Z
|
2022-02-25T14:55:09.000Z
|
defoe/alto/queries/keyword_concordance_by_word.py
|
kallewesterling/defoe
|
d72af2f748fd4363a4718c93bb0b0284b8cb1f3e
|
[
"MIT"
] | 1
|
2022-02-14T13:19:08.000Z
|
2022-02-14T13:19:08.000Z
|
"""
Gets concordance for keywords and groups by word.
"""
from defoe import query_utils
from defoe.alto.query_utils import get_page_matches
def do_query(archives, config_file=None, logger=None, context=None):
"""
Gets concordance for keywords and groups by word.
config_file must be the path to a configuration file with a list
of the keywords to search for, one per line.
Both keywords and words in documents are normalized, by removing
all non-'a-z|A-Z' characters.
Returns result of form:
{
<WORD>:
[
{
"title": <TITLE>,
"place": <PLACE>,
"publisher": <PUBLISHER>,
"page_number": <PAGE_NUMBER>,
"content": <PAGE_CONTENT>,
"year": <YEAR>,
"document_id": <DOCUMENT_ID>,
"filename": <FILENAME>
},
...
],
<WORD>:
...
}
:param archives: RDD of defoe.alto.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: information on documents in which keywords occur grouped
by word
:rtype: dict
"""
keywords = query_utils.get_normalized_keywords(config_file)
# [document, ...]
documents = archives.flatMap(
lambda archive: [document for document in list(archive)]
)
# [(year, document, page, word), ...]
filtered_words = documents.flatMap(
lambda document: get_page_matches(document, keywords)
)
# [(year, document, page, word), ...]
# =>
# [(word, {"title": title, ...}), ...]
matching_docs = filtered_words.map(
lambda year_document_page_word: (
year_document_page_word[3],
{
"title": year_document_page_word[1].title,
"place": year_document_page_word[1].place,
"publisher": year_document_page_word[1].publisher,
"page_number": year_document_page_word[2].code,
"content": year_document_page_word[2].content,
"year": year_document_page_word[0],
"document_id": year_document_page_word[1].code,
"filename": year_document_page_word[1].archive.filename,
},
)
)
# [(word, {"title": title, ...}), ...]
# =>
# [(word, [{"title": title, ...], {...}), ...)]
result = (
matching_docs.groupByKey()
.map(lambda year_context: (year_context[0], list(year_context[1])))
.collect()
)
return result
| 31
| 75
| 0.547686
|
from defoe import query_utils
from defoe.alto.query_utils import get_page_matches
def do_query(archives, config_file=None, logger=None, context=None):
keywords = query_utils.get_normalized_keywords(config_file)
documents = archives.flatMap(
lambda archive: [document for document in list(archive)]
)
filtered_words = documents.flatMap(
lambda document: get_page_matches(document, keywords)
)
matching_docs = filtered_words.map(
lambda year_document_page_word: (
year_document_page_word[3],
{
"title": year_document_page_word[1].title,
"place": year_document_page_word[1].place,
"publisher": year_document_page_word[1].publisher,
"page_number": year_document_page_word[2].code,
"content": year_document_page_word[2].content,
"year": year_document_page_word[0],
"document_id": year_document_page_word[1].code,
"filename": year_document_page_word[1].archive.filename,
},
)
)
result = (
matching_docs.groupByKey()
.map(lambda year_context: (year_context[0], list(year_context[1])))
.collect()
)
return result
| true
| true
|
790766a08780825704c1782b1118000d8a145bac
| 2,668
|
py
|
Python
|
chocs/http_request.py
|
gezpage/chocs
|
cf64a792989e3f23dc7f400045898761511a229a
|
[
"MIT"
] | null | null | null |
chocs/http_request.py
|
gezpage/chocs
|
cf64a792989e3f23dc7f400045898761511a229a
|
[
"MIT"
] | null | null | null |
chocs/http_request.py
|
gezpage/chocs
|
cf64a792989e3f23dc7f400045898761511a229a
|
[
"MIT"
] | null | null | null |
from cgi import parse_header
from io import BytesIO
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
from .headers import Headers
from .http_method import HttpMethod
from .message.body import RequestBody
from .message.form_body import FormBody
from .message.json_body import JsonBody
from .message.multipart_body import MultipartBody
from .query_string import QueryString
class HttpRequest:
def __init__(
self,
method: HttpMethod,
uri: str = "/",
body: Optional[BytesIO] = None,
query_string: Optional[QueryString] = None,
headers: Optional[Headers] = None,
):
self.headers = headers if headers else Headers()
self.body = body if body else BytesIO(b"")
self.method = method
self.uri = uri
self.query_string = query_string
self._parsed_body: Union[RequestBody, str] = ""
self.attributes: Dict[str, str] = {}
@property
def parsed_body(self) -> Union[RequestBody, str]:
if self._parsed_body:
return self._parsed_body
content_type: Tuple[str, Dict[str, str]] = parse_header(
self.headers.get("Content-Type") # type: ignore
)
if content_type[0] == "multipart/form-data":
body: Union[RequestBody, str] = MultipartBody.from_wsgi(
self.body,
content_type[1].get("charset", ""),
content_type[1].get("boundary", ""),
)
elif content_type[0] == "application/x-www-form-urlencoded":
body = FormBody.from_wsgi(self.body, content_type[1].get("charset", ""))
elif content_type[0] == "application/json":
body = JsonBody.from_wsgi(self.body, content_type[1].get("charset", ""))
else:
self.body.seek(0)
body = self.body.read().decode(content_type[1].get("charset", ""))
self._parsed_body = body
return self._parsed_body
@classmethod
def from_wsgi(cls, environ: dict) -> "HttpRequest":
headers = Headers()
for key, value in environ.items():
if not key.startswith("HTTP"):
continue
headers.set(key, value)
headers.set("Content-Type", environ.get("CONTENT_TYPE", "text/plain"))
return cls(
method=HttpMethod(environ.get("REQUEST_METHOD", "GET").upper()),
uri=environ.get("PATH_INFO", "/"),
body=environ.get("wsgi.input", BytesIO(b"")),
query_string=QueryString(environ.get("QUERY_STRING", "")),
headers=headers,
)
__all__ = ["HttpRequest"]
| 33.35
| 84
| 0.612444
|
from cgi import parse_header
from io import BytesIO
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
from .headers import Headers
from .http_method import HttpMethod
from .message.body import RequestBody
from .message.form_body import FormBody
from .message.json_body import JsonBody
from .message.multipart_body import MultipartBody
from .query_string import QueryString
class HttpRequest:
def __init__(
self,
method: HttpMethod,
uri: str = "/",
body: Optional[BytesIO] = None,
query_string: Optional[QueryString] = None,
headers: Optional[Headers] = None,
):
self.headers = headers if headers else Headers()
self.body = body if body else BytesIO(b"")
self.method = method
self.uri = uri
self.query_string = query_string
self._parsed_body: Union[RequestBody, str] = ""
self.attributes: Dict[str, str] = {}
@property
def parsed_body(self) -> Union[RequestBody, str]:
if self._parsed_body:
return self._parsed_body
content_type: Tuple[str, Dict[str, str]] = parse_header(
self.headers.get("Content-Type")
)
if content_type[0] == "multipart/form-data":
body: Union[RequestBody, str] = MultipartBody.from_wsgi(
self.body,
content_type[1].get("charset", ""),
content_type[1].get("boundary", ""),
)
elif content_type[0] == "application/x-www-form-urlencoded":
body = FormBody.from_wsgi(self.body, content_type[1].get("charset", ""))
elif content_type[0] == "application/json":
body = JsonBody.from_wsgi(self.body, content_type[1].get("charset", ""))
else:
self.body.seek(0)
body = self.body.read().decode(content_type[1].get("charset", ""))
self._parsed_body = body
return self._parsed_body
@classmethod
def from_wsgi(cls, environ: dict) -> "HttpRequest":
headers = Headers()
for key, value in environ.items():
if not key.startswith("HTTP"):
continue
headers.set(key, value)
headers.set("Content-Type", environ.get("CONTENT_TYPE", "text/plain"))
return cls(
method=HttpMethod(environ.get("REQUEST_METHOD", "GET").upper()),
uri=environ.get("PATH_INFO", "/"),
body=environ.get("wsgi.input", BytesIO(b"")),
query_string=QueryString(environ.get("QUERY_STRING", "")),
headers=headers,
)
__all__ = ["HttpRequest"]
| true
| true
|
79076703837acc7a26fa35bcc457666440e95048
| 5,389
|
py
|
Python
|
selfdrive/locationd/models/car_kf.py
|
qiubit/openpilot
|
013e49bf907539d119fbebcf02f4ce3749849065
|
[
"MIT"
] | null | null | null |
selfdrive/locationd/models/car_kf.py
|
qiubit/openpilot
|
013e49bf907539d119fbebcf02f4ce3749849065
|
[
"MIT"
] | null | null | null |
selfdrive/locationd/models/car_kf.py
|
qiubit/openpilot
|
013e49bf907539d119fbebcf02f4ce3749849065
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import math
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
# Vehicle model params
STIFFNESS = _slice(1) # [-]
STEER_RATIO = _slice(1) # [-]
ANGLE_OFFSET = _slice(1) # [rad]
ANGLE_OFFSET_FAST = _slice(1) # [rad]
VELOCITY = _slice(2) # (x, y) [m/s]
YAW_RATE = _slice(1) # [rad/s]
STEER_ANGLE = _slice(1) # [rad]
class CarKalman():
name = 'car'
x_initial = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
])
# process noise
Q = np.diag([
(.05/100)**2,
.01**2,
math.radians(0.002)**2,
math.radians(0.1)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
])
P_initial = Q.copy()
obs_noise = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(5.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
maha_test_kinds = [] # [ObservationKind.ROAD_FRAME_YAW_RATE, ObservationKind.ROAD_FRAME_XY_SPEED]
global_vars = [
sp.Symbol('mass'),
sp.Symbol('rotational_inertia'),
sp.Symbol('center_to_front'),
sp.Symbol('center_to_rear'),
sp.Symbol('stiffness_front'),
sp.Symbol('stiffness_rear'),
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.x_initial.shape[0]
name = CarKalman.name
maha_test_kinds = CarKalman.maha_test_kinds
# globals
m, j, aF, aR, cF_orig, cR_orig = CarKalman.global_vars
# make functions and jacobians with sympy
# state variables
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
# Vehicle model constants
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
x = sp.Matrix([v, r]) # lateral velocity, yaw rate
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast)
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
# Basic descretization, 1st order integrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
#
# Observation functions
#
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, maha_test_kinds=maha_test_kinds, global_vars=CarKalman.global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0):
self.dim_state = self.x_initial.shape[0]
x_init = self.x_initial
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.x_initial, self.P_initial, self.dim_state, self.dim_state, maha_test_kinds=self.maha_test_kinds, global_vars=self.global_vars)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data, R=None):
if len(data) > 0:
data = np.atleast_2d(data)
if R is None:
R = self.get_R(kind, len(data))
self.filter.predict_and_update_batch(t, kind, data, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
| 27.494898
| 191
| 0.642234
|
import sys
import math
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
STIFFNESS = _slice(1)
STEER_RATIO = _slice(1)
ANGLE_OFFSET = _slice(1)
ANGLE_OFFSET_FAST = _slice(1)
VELOCITY = _slice(2)
YAW_RATE = _slice(1)
STEER_ANGLE = _slice(1)
class CarKalman():
name = 'car'
x_initial = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
])
Q = np.diag([
(.05/100)**2,
.01**2,
math.radians(0.002)**2,
math.radians(0.1)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
])
P_initial = Q.copy()
obs_noise = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(5.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
maha_test_kinds = []
global_vars = [
sp.Symbol('mass'),
sp.Symbol('rotational_inertia'),
sp.Symbol('center_to_front'),
sp.Symbol('center_to_rear'),
sp.Symbol('stiffness_front'),
sp.Symbol('stiffness_rear'),
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.x_initial.shape[0]
name = CarKalman.name
maha_test_kinds = CarKalman.maha_test_kinds
m, j, aF, aR, cF_orig, cR_orig = CarKalman.global_vars
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
x = sp.Matrix([v, r])
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast)
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
f_sym = state + dt * state_dot
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, maha_test_kinds=maha_test_kinds, global_vars=CarKalman.global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0):
self.dim_state = self.x_initial.shape[0]
x_init = self.x_initial
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.x_initial, self.P_initial, self.dim_state, self.dim_state, maha_test_kinds=self.maha_test_kinds, global_vars=self.global_vars)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data, R=None):
if len(data) > 0:
data = np.atleast_2d(data)
if R is None:
R = self.get_R(kind, len(data))
self.filter.predict_and_update_batch(t, kind, data, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
| true
| true
|
79076923568fabee83427836533b36fca23e5f6c
| 795
|
py
|
Python
|
set1/detectsb.py
|
elevenchars/cryptopals
|
d0cbe6dfcf7caf7f630b8867c38d1af2f42deb33
|
[
"MIT"
] | null | null | null |
set1/detectsb.py
|
elevenchars/cryptopals
|
d0cbe6dfcf7caf7f630b8867c38d1af2f42deb33
|
[
"MIT"
] | null | null | null |
set1/detectsb.py
|
elevenchars/cryptopals
|
d0cbe6dfcf7caf7f630b8867c38d1af2f42deb33
|
[
"MIT"
] | null | null | null |
import string
import sbxor
"""
Detect single-character XOR
One of the 60-character strings in this file (4.txt) has been encrypted by single-character XOR.
Find it.
"""
if __name__ == "__main__":
with open("data/4.txt", "r") as data_file:
data = data_file.read().split("\n")
candidates = []
for line in data[:]:
line_byte = bytearray.fromhex(line)
sb = sbxor.solve(line_byte)
if len(sb) != 0:
candidates.append([line_byte, sb])
print(f"{len(candidates)} candidate(s) found for single-byte xor\n")
for candidate in candidates:
print(f"Ciphertext: {candidate[0]}")
print("Possible solution(s):")
for b in candidate[1]:
print(f"Key: {b[0]}")
print(f"Plaintext: {repr(b[1])}")
| 26.5
| 96
| 0.6
|
import string
import sbxor
if __name__ == "__main__":
with open("data/4.txt", "r") as data_file:
data = data_file.read().split("\n")
candidates = []
for line in data[:]:
line_byte = bytearray.fromhex(line)
sb = sbxor.solve(line_byte)
if len(sb) != 0:
candidates.append([line_byte, sb])
print(f"{len(candidates)} candidate(s) found for single-byte xor\n")
for candidate in candidates:
print(f"Ciphertext: {candidate[0]}")
print("Possible solution(s):")
for b in candidate[1]:
print(f"Key: {b[0]}")
print(f"Plaintext: {repr(b[1])}")
| true
| true
|
79076b9574986ce6cb14727d4a45b47fda3bc52a
| 3,434
|
py
|
Python
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class KeyStatusInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key_id': 'str',
'key_state': 'str'
}
attribute_map = {
'key_id': 'key_id',
'key_state': 'key_state'
}
def __init__(self, key_id=None, key_state=None):
"""KeyStatusInfo - a model defined in huaweicloud sdk"""
self._key_id = None
self._key_state = None
self.discriminator = None
if key_id is not None:
self.key_id = key_id
if key_state is not None:
self.key_state = key_state
@property
def key_id(self):
"""Gets the key_id of this KeyStatusInfo.
密钥ID
:return: The key_id of this KeyStatusInfo.
:rtype: str
"""
return self._key_id
@key_id.setter
def key_id(self, key_id):
"""Sets the key_id of this KeyStatusInfo.
密钥ID
:param key_id: The key_id of this KeyStatusInfo.
:type: str
"""
self._key_id = key_id
@property
def key_state(self):
"""Gets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:return: The key_state of this KeyStatusInfo.
:rtype: str
"""
return self._key_state
@key_state.setter
def key_state(self, key_state):
"""Sets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:param key_state: The key_state of this KeyStatusInfo.
:type: str
"""
self._key_state = key_state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeyStatusInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.884058
| 74
| 0.534945
|
import pprint
import re
import six
class KeyStatusInfo:
sensitive_list = []
openapi_types = {
'key_id': 'str',
'key_state': 'str'
}
attribute_map = {
'key_id': 'key_id',
'key_state': 'key_state'
}
def __init__(self, key_id=None, key_state=None):
self._key_id = None
self._key_state = None
self.discriminator = None
if key_id is not None:
self.key_id = key_id
if key_state is not None:
self.key_state = key_state
@property
def key_id(self):
return self._key_id
@key_id.setter
def key_id(self, key_id):
self._key_id = key_id
@property
def key_state(self):
return self._key_state
@key_state.setter
def key_state(self, key_state):
self._key_state = key_state
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, KeyStatusInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
79076b9ac6bcbac561556c30cc236bce2191838a
| 7,627
|
py
|
Python
|
Sea/adapter/connections/Connection.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 2
|
2015-07-02T13:34:09.000Z
|
2015-09-28T09:07:52.000Z
|
Sea/adapter/connections/Connection.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | null | null | null |
Sea/adapter/connections/Connection.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 1
|
2022-01-22T03:01:54.000Z
|
2022-01-22T03:01:54.000Z
|
import abc
import logging
import Sea
import numpy as np
import itertools
from ..base import Base
class Connection(Base, Sea.model.connections.Connection):
"""
Abstract base class for all :mod:`Sea.adapter.connections` classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, system, components):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "System", "Component", "System this connection belongs to.")
obj.System = system
obj.couplings = self.couplings
obj.makeCoupling = self.makeCoupling
obj.updateCouplings = self.updateCouplings
obj.addCouplings = self.addCouplings
#obj.addProperty("App::PropertyLinkList", "Couplings", "Connection", "List of all couplings.")
obj.addProperty("App::PropertyLinkList", "Components", "Connection", "Components that are connected via this connection.")
obj.Frequency = system.Frequency
#obj.addProperty("App::PropertyLink", "CouplingsGroup", "Groups", "Couplings that are part of System.")
#obj.CouplingsGroup = group.newObject("App::DocumentObjectGroup", "GroupCouplings")
#obj.CouplingsGroup.Label = "Couplings"
#obj.addProperty("Part::PropertyPartShape", "Shape", "Connection", "Shape of the connection.")
#obj.addProperty("App::PropertyBool", "UpdateCouplings", "Connection", "Update couplings when the connection changes.").UpdateCouplings = True
#obj.addProperty("App::PropertyString", "Sort", "Connection", "Is the connection described by a point, line or area.")
obj.addProperty("App::PropertyFloatList", "ImpedanceJunction", "Connection", "Total impedance at the junction.")
obj.setEditorMode("ImpedanceJunction", 1)
obj.Components = components
#obj.Shape = component_a.Shape.common(component_b.Shape)
obj.updateCouplings()
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'Components':
pass
#elif prop == 'Shape':
#self.updateCouplings(obj)
#if prop == 'Frequency':
#for coupling in obj.couplings():
#coupling.Frequency = obj.Frequency
def execute(self, obj):
Base.execute(self, obj)
@staticmethod
def couplings(obj):
return filter(Sea.actions.document.isCoupling, obj.InList)
@abc.abstractmethod
def updateComponents(self, obj):
pass
#@staticmethod
#def updateShape(obj):
#"""
#Update the common shape between the components.
#"""
#connection = Sea.adapter.connection.ShapeConnection([item.Shape for item in self.Components])
#shape = connection.shape()
#obj.Shape = shape
@staticmethod
def updateCouplings(connection):
"""
The shape has changed, which means couplings might have to change, be added or removed.
To be sure all couplings in this connection are deleted and then build up from scratch.
"""
"""Remove all old couplings."""
for coupling in connection.couplings():
connection.Document.removeObject(coupling.Name)
"""Add couplings for every shape."""
connection.addCouplings()
@staticmethod
def addCouplings(connection):
"""
Add couplings to the :attr:`connection`.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
"""
for comp_from, comp_to in itertools.permutations(connection.Components, 2):
coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to)
if not coupling_sort:
App.Console.PrintWarning("Cannot add coupling.\n")
return
for sub_from, sub_to in itertools.product(comp_from.subsystems(), comp_to.subsystems()):
#print connection
#print 'From: ' + comp_from.ClassName + sub_from
#print 'To: ' + comp_to.ClassName + sub_to
connection.makeCoupling(sub_from, sub_to, coupling_sort)
coupling_options = {
('ConnectionPoint', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionLine', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionSurface', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionPoint', 'Component2DPlate', 'Component2DPlate') : 'Coupling1DStructural',
('ConnectionLine', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component3DCavity') : 'Coupling3DPlateCavity',
('ConnectionSurface', 'Component3DCavity', 'Component2DPlate') : 'Coupling3DCavityPlate',
}
@staticmethod
def determineCouplingType(connection_type, component_from, component_to):
"""
Determine the type of coupling. Detects what type of connection the components have.
Based on the type of connection and on the types of components a coupling is returned.
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
"""
if connection_type:
item = (connection_type, component_from.ClassName, component_to.ClassName)
try:
return Connection.coupling_options[item]
except KeyError:
txt = 'Could not determine the type of coupling for ' + component_from.ClassName + ' to ' + component_to.ClassName + ' with ' + connection_type + '.\n'
App.Console.PrintWarning(txt)
return None
@staticmethod
def makeCoupling(connection, subsystem_from, subsystem_to, sort):
"""
Add a coupling to system.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_from: string representing the type of subsystem
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_to: string representing the type of subsystem
:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
"""
#if connection.System == component_from.System == component_to.System:
from Sea.adapter.object_maps import couplings_map
obj = connection.Document.addObject("App::FeaturePython", 'Coupling')
couplings_map[sort](obj, connection, subsystem_from, subsystem_to)
try:
Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject)
except AttributeError:
pass
obj.Label = obj.ClassName + '_' + subsystem_from.ClassName.replace('Subsystem', '') + '_to_' + subsystem_to.ClassName.replace('Subsystem', '')
logging.info("Sea: Created %s.", obj.Name)
obj.Document.recompute()
return obj
| 42.848315
| 168
| 0.629343
|
import abc
import logging
import Sea
import numpy as np
import itertools
from ..base import Base
class Connection(Base, Sea.model.connections.Connection):
__metaclass__ = abc.ABCMeta
def __init__(self, obj, system, components):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "System", "Component", "System this connection belongs to.")
obj.System = system
obj.couplings = self.couplings
obj.makeCoupling = self.makeCoupling
obj.updateCouplings = self.updateCouplings
obj.addCouplings = self.addCouplings
obj.addProperty("App::PropertyLinkList", "Components", "Connection", "Components that are connected via this connection.")
obj.Frequency = system.Frequency
obj.addProperty("App::PropertyFloatList", "ImpedanceJunction", "Connection", "Total impedance at the junction.")
obj.setEditorMode("ImpedanceJunction", 1)
obj.Components = components
obj.updateCouplings()
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'Components':
pass
def execute(self, obj):
Base.execute(self, obj)
@staticmethod
def couplings(obj):
return filter(Sea.actions.document.isCoupling, obj.InList)
@abc.abstractmethod
def updateComponents(self, obj):
pass
#Update the common shape between the components.
#"""
@staticmethod
def updateCouplings(connection):
for coupling in connection.couplings():
connection.Document.removeObject(coupling.Name)
connection.addCouplings()
@staticmethod
def addCouplings(connection):
for comp_from, comp_to in itertools.permutations(connection.Components, 2):
coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to)
if not coupling_sort:
App.Console.PrintWarning("Cannot add coupling.\n")
return
for sub_from, sub_to in itertools.product(comp_from.subsystems(), comp_to.subsystems()):
connection.makeCoupling(sub_from, sub_to, coupling_sort)
coupling_options = {
('ConnectionPoint', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionLine', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionSurface', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionPoint', 'Component2DPlate', 'Component2DPlate') : 'Coupling1DStructural',
('ConnectionLine', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component3DCavity') : 'Coupling3DPlateCavity',
('ConnectionSurface', 'Component3DCavity', 'Component2DPlate') : 'Coupling3DCavityPlate',
}
@staticmethod
def determineCouplingType(connection_type, component_from, component_to):
if connection_type:
item = (connection_type, component_from.ClassName, component_to.ClassName)
try:
return Connection.coupling_options[item]
except KeyError:
txt = 'Could not determine the type of coupling for ' + component_from.ClassName + ' to ' + component_to.ClassName + ' with ' + connection_type + '.\n'
App.Console.PrintWarning(txt)
return None
@staticmethod
def makeCoupling(connection, subsystem_from, subsystem_to, sort):
from Sea.adapter.object_maps import couplings_map
obj = connection.Document.addObject("App::FeaturePython", 'Coupling')
couplings_map[sort](obj, connection, subsystem_from, subsystem_to)
try:
Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject)
except AttributeError:
pass
obj.Label = obj.ClassName + '_' + subsystem_from.ClassName.replace('Subsystem', '') + '_to_' + subsystem_to.ClassName.replace('Subsystem', '')
logging.info("Sea: Created %s.", obj.Name)
obj.Document.recompute()
return obj
| true
| true
|
79076c669e78b6357ffc874149409ad05a4d6fd5
| 1,188
|
py
|
Python
|
jetbot/motor.py
|
vstoneofficial/jetbot-mecanum
|
cc161b888b3e6cccfde4ff9b653c97af66adb5c8
|
[
"MIT"
] | null | null | null |
jetbot/motor.py
|
vstoneofficial/jetbot-mecanum
|
cc161b888b3e6cccfde4ff9b653c97af66adb5c8
|
[
"MIT"
] | null | null | null |
jetbot/motor.py
|
vstoneofficial/jetbot-mecanum
|
cc161b888b3e6cccfde4ff9b653c97af66adb5c8
|
[
"MIT"
] | null | null | null |
import atexit
from .MecanumRover_MotorDriver import MecanumRover_MotorDriver
import traitlets
from traitlets.config.configurable import Configurable
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = self._driver.getMotor(channel)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
# ジョイスティック等の値ブレ対策
if abs(value) <= 0.05:
value = 0.0
#モータの目標速度(mm/s)に変換。※最高1300mm/s
mapped_value = int(1300.0 * (self.alpha * value + self.beta))
speed = min(max(mapped_value, -1300), 1300)
self._motor.setSpeed(speed)
def _release(self):
"""Stops motor by releasing control"""
self._motor.setSpeed(0)
| 29.7
| 77
| 0.655724
|
import atexit
from .MecanumRover_MotorDriver import MecanumRover_MotorDriver
import traitlets
from traitlets.config.configurable import Configurable
class Motor(Configurable):
value = traitlets.Float()
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs)
self._driver = driver
self._motor = self._driver.getMotor(channel)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
if abs(value) <= 0.05:
value = 0.0
mapped_value = int(1300.0 * (self.alpha * value + self.beta))
speed = min(max(mapped_value, -1300), 1300)
self._motor.setSpeed(speed)
def _release(self):
self._motor.setSpeed(0)
| true
| true
|
79076cafd810b7f65fffd21ff597d388ea804694
| 13,227
|
py
|
Python
|
tools/third_party/pytest/testing/test_parseopt.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 9
|
2019-04-01T10:57:10.000Z
|
2021-12-02T11:12:06.000Z
|
tools/third_party/pytest/testing/test_parseopt.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 33
|
2019-03-21T10:18:37.000Z
|
2022-03-23T13:21:40.000Z
|
tools/third_party/pytest/testing/test_parseopt.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
from __future__ import absolute_import, division, print_function
import argparse
import sys
import os
import py
import pytest
from _pytest.config import argparsing as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument("-t")
assert argument._short_opts == ["-t"]
assert argument._long_opts == []
assert argument.dest == "t"
argument = parseopt.Argument("-t", "--test")
assert argument._short_opts == ["-t"]
assert argument._long_opts == ["--test"]
assert argument.dest == "test"
argument = parseopt.Argument("-t", "--test", dest="abc")
assert argument.dest == "abc"
assert (
str(argument)
== ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')")
)
def test_argument_type(self):
argument = parseopt.Argument("-t", dest="abc", type=int)
assert argument.type is int
argument = parseopt.Argument("-t", dest="abc", type=str)
assert argument.type is str
argument = parseopt.Argument("-t", dest="abc", type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument("-t", dest="abc", type="choice")
argument = parseopt.Argument(
"-t", dest="abc", type=str, choices=["red", "blue"]
)
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument("-t", type=int)
argument.default = 42
argument.dest = "abc"
res = argument.attrs()
assert res["default"] == 42
assert res["dest"] == "abc"
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str({"--option1"}) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(
ValueError,
"""
group.addoption("-x", action="store_true")
""",
)
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(["--hello", "world"])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ["x"]
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(
["x", "--y", "--hello", "this"]
)
assert ns.hello
assert ns.file_or_dir == ["x"]
assert unknown == ["--y", "this"]
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(["--hello", "world"], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(["--ultimate-answer", "42"])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action="store_true")
parser.addoption("-S", action="store_false")
args = parser.parse(["-R", "4", "2", "-S"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
args = parser.parse(["-R", "-S", "4", "2", "-R"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
args = parser.parse(["-R", "4", "-S", "2"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, "type"):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = argparse.ArgumentParser(
formatter_class=parseopt.DropShorterLongHelpFormatter
)
parser.add_argument(
"-t", "--twoword", "--duo", "--two-word", "--two", help="foo"
).map_long_option = {
"two": "two-word"
}
# throws error on --deux only!
parser.add_argument(
"-d", "--deuxmots", "--deux-mots", action="store_true", help="foo"
).map_long_option = {
"deux": "deux-mots"
}
parser.add_argument("-s", action="store_true", help="single short")
parser.add_argument("--abc", "-a", action="store_true", help="bar")
parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar")
parser.add_argument(
"-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar"
)
parser.add_argument(
"--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar"
)
parser.add_argument(
"-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam"
).map_long_option = {
"exitfirst": "exit-on-first"
}
parser.add_argument("files_and_dirs", nargs="*")
args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"])
assert args.twoword == "hallo"
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(["--deux-mots"])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(["file", "dir"])
assert "|".join(args.files_and_dirs) == "file|dir"
def test_drop_short_0(self, parser):
parser.addoption("--funcarg", "--func-arg", action="store_true")
parser.addoption("--abc-def", "--abc-def", action="store_true")
parser.addoption("--klm-hij", action="store_true")
args = parser.parse(["--funcarg", "--k"])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption("--func-arg", "--doit", action="store_true")
args = parser.parse(["--doit"])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true")
args = parser.parse(["abcd"])
assert args.func_arg is False
assert args.file_or_dir == ["abcd"]
def test_drop_short_help0(self, parser, capsys):
parser.addoption("--func-args", "--doit", help="foo", action="store_true")
parser.parse([])
help = parser.optparser.format_help()
assert "--func-args, --doit foo" in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption("--doit", "--func-args", action="store_true", help="foo")
group._addoption(
"-h",
"--help",
action="store_true",
dest="help",
help="show help message and configuration info",
)
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "-doit, --func-args foo" in help
def test_multiple_metavar_help(self, parser):
"""
Help text for options with a metavar tuple should display help
in the form "--preferences=value1 value2 value3" (#2004).
"""
group = parser.getgroup("general")
group.addoption(
"--preferences", metavar=("value1", "value2", "value3"), nargs=3
)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "--preferences=value1 value2 value3" in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind("bash"):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,))
with open(str(script), "w") as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv("_ARGCOMPLETE", "1")
monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b")
monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:")
arg = "--fu"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
os.mkdir("test_argcomplete.d")
arg = "test_argc"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| 39.249258
| 86
| 0.597112
|
from __future__ import absolute_import, division, print_function
import argparse
import sys
import os
import py
import pytest
from _pytest.config import argparsing as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
argument = parseopt.Argument()
argument = parseopt.Argument("-t")
assert argument._short_opts == ["-t"]
assert argument._long_opts == []
assert argument.dest == "t"
argument = parseopt.Argument("-t", "--test")
assert argument._short_opts == ["-t"]
assert argument._long_opts == ["--test"]
assert argument.dest == "test"
argument = parseopt.Argument("-t", "--test", dest="abc")
assert argument.dest == "abc"
assert (
str(argument)
== ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')")
)
def test_argument_type(self):
argument = parseopt.Argument("-t", dest="abc", type=int)
assert argument.type is int
argument = parseopt.Argument("-t", dest="abc", type=str)
assert argument.type is str
argument = parseopt.Argument("-t", dest="abc", type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument("-t", dest="abc", type="choice")
argument = parseopt.Argument(
"-t", dest="abc", type=str, choices=["red", "blue"]
)
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument("-t", type=int)
argument.default = 42
argument.dest = "abc"
res = argument.attrs()
assert res["default"] == 42
assert res["dest"] == "abc"
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str({"--option1"}) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(
ValueError,
"""
group.addoption("-x", action="store_true")
""",
)
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(["--hello", "world"])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ["x"]
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(
["x", "--y", "--hello", "this"]
)
assert ns.hello
assert ns.file_or_dir == ["x"]
assert unknown == ["--y", "this"]
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(["--hello", "world"], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(["--ultimate-answer", "42"])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action="store_true")
parser.addoption("-S", action="store_false")
args = parser.parse(["-R", "4", "2", "-S"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
args = parser.parse(["-R", "-S", "4", "2", "-R"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
args = parser.parse(["-R", "4", "-S", "2"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, "type"):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = argparse.ArgumentParser(
formatter_class=parseopt.DropShorterLongHelpFormatter
)
parser.add_argument(
"-t", "--twoword", "--duo", "--two-word", "--two", help="foo"
).map_long_option = {
"two": "two-word"
}
parser.add_argument(
"-d", "--deuxmots", "--deux-mots", action="store_true", help="foo"
).map_long_option = {
"deux": "deux-mots"
}
parser.add_argument("-s", action="store_true", help="single short")
parser.add_argument("--abc", "-a", action="store_true", help="bar")
parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar")
parser.add_argument(
"-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar"
)
parser.add_argument(
"--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar"
)
parser.add_argument(
"-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam"
).map_long_option = {
"exitfirst": "exit-on-first"
}
parser.add_argument("files_and_dirs", nargs="*")
args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"])
assert args.twoword == "hallo"
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(["--deux-mots"])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(["file", "dir"])
assert "|".join(args.files_and_dirs) == "file|dir"
def test_drop_short_0(self, parser):
parser.addoption("--funcarg", "--func-arg", action="store_true")
parser.addoption("--abc-def", "--abc-def", action="store_true")
parser.addoption("--klm-hij", action="store_true")
args = parser.parse(["--funcarg", "--k"])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption("--func-arg", "--doit", action="store_true")
args = parser.parse(["--doit"])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true")
args = parser.parse(["abcd"])
assert args.func_arg is False
assert args.file_or_dir == ["abcd"]
def test_drop_short_help0(self, parser, capsys):
parser.addoption("--func-args", "--doit", help="foo", action="store_true")
parser.parse([])
help = parser.optparser.format_help()
assert "--func-args, --doit foo" in help
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption("--doit", "--func-args", action="store_true", help="foo")
group._addoption(
"-h",
"--help",
action="store_true",
dest="help",
help="show help message and configuration info",
)
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "-doit, --func-args foo" in help
def test_multiple_metavar_help(self, parser):
group = parser.getgroup("general")
group.addoption(
"--preferences", metavar=("value1", "value2", "value3"), nargs=3
)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "--preferences=value1 value2 value3" in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind("bash"):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,))
with open(str(script), "w") as fp:
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
monkeypatch.setenv("_ARGCOMPLETE", "1")
monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b")
monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:")
arg = "--fu"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
os.mkdir("test_argcomplete.d")
arg = "test_argc"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| true
| true
|
79076d00240283dc304d55e6d32db1b60327fbb9
| 734
|
py
|
Python
|
icarus_nmr/scripts/digital_controller_terminal_client.py
|
vstadnytskyi/icarus-nmr
|
9f86ebb66256482023df4c15e552a5dd5573d3ba
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T22:05:17.000Z
|
2022-02-07T22:05:17.000Z
|
icarus_nmr/scripts/digital_controller_terminal_client.py
|
vstadnytskyi/icarus-nmr
|
9f86ebb66256482023df4c15e552a5dd5573d3ba
|
[
"BSD-3-Clause"
] | 25
|
2019-10-16T13:58:09.000Z
|
2022-02-08T21:34:24.000Z
|
icarus_nmr/scripts/digital_controller_terminal_client.py
|
vstadnytskyi/icarus-nmr
|
9f86ebb66256482023df4c15e552a5dd5573d3ba
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T22:05:27.000Z
|
2022-02-07T22:05:27.000Z
|
#!/usr/bin/env python3
"""
"""
import socket
device_ca_server_prefix = f'{socket.gethostname()}_dio_controller:'
from caproto.threading.client import Context
ctx = Context()
ca_name = device_ca_server_prefix
pv_names = ['dio',
'bit0_indicator',
'bit0',
'bit0_enable',
'bit1_indicator',
'bit1',
'bit1_enable',
'bit2_indicator',
'bit2',
'bit2_enable',
'bit3_indicator',
'bit3',
'bit3_enable']
pvs = {}
for item in pv_names:
pvs[item], = ctx.get_pvs(f'{ca_name}{item}',)
if __name__ == '__main__':
pass
| 23.677419
| 68
| 0.491826
|
import socket
device_ca_server_prefix = f'{socket.gethostname()}_dio_controller:'
from caproto.threading.client import Context
ctx = Context()
ca_name = device_ca_server_prefix
pv_names = ['dio',
'bit0_indicator',
'bit0',
'bit0_enable',
'bit1_indicator',
'bit1',
'bit1_enable',
'bit2_indicator',
'bit2',
'bit2_enable',
'bit3_indicator',
'bit3',
'bit3_enable']
pvs = {}
for item in pv_names:
pvs[item], = ctx.get_pvs(f'{ca_name}{item}',)
if __name__ == '__main__':
pass
| true
| true
|
79076f0dbe5eb29198394088ef9ac12dce4860fa
| 7,565
|
py
|
Python
|
rbtools/testing/testcase.py
|
pombredanne/rbtools
|
b4838a640b458641ffd233093ae65971d0b4d529
|
[
"MIT"
] | 113
|
2015-01-01T15:26:22.000Z
|
2022-01-02T11:37:46.000Z
|
rbtools/testing/testcase.py
|
pombredanne/rbtools
|
b4838a640b458641ffd233093ae65971d0b4d529
|
[
"MIT"
] | 29
|
2015-01-06T21:31:50.000Z
|
2022-01-12T05:37:26.000Z
|
rbtools/testing/testcase.py
|
pombredanne/rbtools
|
b4838a640b458641ffd233093ae65971d0b4d529
|
[
"MIT"
] | 109
|
2015-01-03T20:55:05.000Z
|
2022-02-15T13:18:44.000Z
|
"""Base test cases for RBTools unit tests."""
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
"""The base class for RBTools test cases.
This provides helpful utility functions, environment management, and
better docstrings to help craft unit tests for RBTools functionality.
All RBTools unit tests should use this this class or a subclass of it
as the base class.
"""
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
#: Whether individual unit tests need a new temporary HOME directory.
#:
#: If set, a directory will be created at test startup, and will be
#: set as the home directory.
#:
#: Version Added:
#: 3.0
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
Returns:
unicode:
The descriptive text for the current unit test.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
"""Return the user's current home directory.
Version Added:
3.0
Returns:
unicode:
The current home directory.
"""
return os.environ['HOME']
def set_user_home(self, path):
"""Set the user's current home directory.
This will be unset when the unit test has finished.
Version Added:
3.0
Args:
path (unicode):
The new home directory.
"""
os.environ['HOME'] = path
def chdir_tmp(self):
"""Create a temporary directory and set it as the working directory.
The directory will be deleted after the test has finished.
Version Added:
3.0
Returns:
unicode:
The path to the temp directory.
"""
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
"""Pre-create a specific number of temporary files.
This will call :py:func:`~rbtools.utils.filesystem.make_tempfile`
the specified number of times, returning the list of generated temp
file paths, and will then spy that function to return those temp
files.
Once each pre-created temp file is used up, any further calls to
:py:func:`~rbtools.utils.filesystem.make_tempfile` will result in
an error, failing the test.
This is useful in unit tests that need to script a series of
expected calls using :py:mod:`kgb` (such as through
:py:class:`kgb.ops.SpyOpMatchInOrder`) that need to know the names
of temporary filenames up-front.
Unit test suites that use this must mix in :py:class:`kgb.SpyAgency`.
Args:
count (int):
The number of temporary filenames to pre-create.
Raises:
AssertionError:
The test suite class did not mix in :py:class:`kgb.SpyAgency`.
"""
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
"""Assert that two diffs are equal.
Args:
diff (bytes):
The generated diff.
expected_diff (bytes):
The expected diff.
Raises:
AssertionError:
The diffs aren't equal or of the right type.
"""
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
"""Assert that a call raises an exception with the given message.
Args:
expected_exception (type):
The type of exception that's expected to be raised.
expected_message (unicode):
The expected exception message.
Raises:
AssertionError:
The assertion failure, if the exception and message isn't
raised.
"""
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
"""Populate a temporary .reviewboardrc file.
This will create a :file:`.reviewboardrc` file, either in the current
directory or in a new temporary directory (if ``use_temp_dir`` is set).
The file will contain the provided configuration.
Version Added:
3.0
Args:
config (dict):
A dictionary of key-value pairs to write into the
:file:`.reviewboardrc` file.
A best effort attempt will be made to write each configuration
to the file.
use_temp_dir (bool, optional):
Whether a temporary directory should be created and set as
the current directory. If set, the file will be written there,
and the directory will be removed after the context manager
finishes.
Context:
The code being run will have a :file:`.reviewboardrc` in the
current directory.
"""
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
| 28.655303
| 79
| 0.59458
|
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import tempfile
import unittest
from contextlib import contextmanager
import six
from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir
import kgb
from rbtools.utils.filesystem import make_tempfile
class TestCase(unittest.TestCase):
ws_re = re.compile(r'\s+')
default_text_editor = '%s %s' % (
sys.executable,
os.path.abspath(os.path.join(os.path.dirname(__file__),
'scripts', 'editor.py'))
)
maxDiff = 10000
needs_temp_home = False
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._cls_old_cwd = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(cls._cls_old_cwd)
super(TestCase, cls).tearDownClass()
def setUp(self):
super(TestCase, self).setUp()
self._old_cwd = os.getcwd()
self.old_home = self.get_user_home()
if self.needs_temp_home:
self.set_user_home(make_tempdir())
os.environ[str('RBTOOLS_EDITOR')] = str(self.default_text_editor)
def tearDown(self):
super(TestCase, self).tearDown()
os.chdir(self._old_cwd)
cleanup_tempfiles()
if self.old_home:
self.set_user_home(self.old_home)
def shortDescription(self):
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_user_home(self):
return os.environ['HOME']
def set_user_home(self, path):
os.environ['HOME'] = path
def chdir_tmp(self):
dirname = make_tempdir()
os.chdir(dirname)
return dirname
def precreate_tempfiles(self, count):
assert hasattr(self, 'spy_on'), (
'%r must mix in kgb.SpyAgency in order to call this method.'
% self.__class__)
tmpfiles = [
make_tempfile()
for i in range(count)
]
self.spy_on(make_tempfile, op=kgb.SpyOpReturnInOrder(tmpfiles))
return tmpfiles
def assertDiffEqual(self, diff, expected_diff):
self.assertIsInstance(diff, bytes)
self.assertIsInstance(expected_diff, bytes)
self.assertEqual(diff.splitlines(), expected_diff.splitlines())
def assertRaisesMessage(self, expected_exception, expected_message):
return self.assertRaisesRegexp(expected_exception,
re.escape(expected_message))
@contextmanager
def reviewboardrc(self, config, use_temp_dir=False):
if use_temp_dir:
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
with open('.reviewboardrc', 'w') as fp:
for key, value in six.iteritems(config):
fp.write('%s = %r\n' % (key, value))
try:
yield
finally:
if use_temp_dir:
os.chdir(cwd)
shutil.rmtree(temp_dir)
| true
| true
|
79076fc838d5a53d885fce2b85738d7a10c638c3
| 1,340
|
py
|
Python
|
RRDGraphs/rrd_1month.py
|
SelectLOL1/BeagleBoneBlack_PRU_PowerMeter
|
ee027e2713e1649ca7c4b68a737cd611695a6855
|
[
"MIT"
] | null | null | null |
RRDGraphs/rrd_1month.py
|
SelectLOL1/BeagleBoneBlack_PRU_PowerMeter
|
ee027e2713e1649ca7c4b68a737cd611695a6855
|
[
"MIT"
] | null | null | null |
RRDGraphs/rrd_1month.py
|
SelectLOL1/BeagleBoneBlack_PRU_PowerMeter
|
ee027e2713e1649ca7c4b68a737cd611695a6855
|
[
"MIT"
] | null | null | null |
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import numpy as np
import rrdtool
start = 2628000
end = 0
if int(end) <= 0:
end = 2
if int(start) <= 0:
start = 600
epochTimeNow = int(time.time()-1)
data = rrdtool.fetch('/home/bca/rrdtoolfilesave/powerCapturenew.rrd', 'AVERAGE',
'--start', f'-{start}',
'--end', f'-{end}')
values = np.array(data[2])
values[values == None] = 0
epochEndTime = epochTimeNow - int(end)
epochStartTime = epochTimeNow - int(start)
timeseries = np.zeros(shape=((epochEndTime-epochStartTime + 1), 1))
for i in range (epochEndTime - epochStartTime + 1):
timeseries[i] = epochStartTime + 7200 + i
fig, ax = plt.subplots()
timeseries = mdate.epoch2num(timeseries)
ax.plot_date(timeseries, values, linestyle = '-', marker = '', label=f'AllThePower')
timeseriesFormat = '%d-%m-%y %H:%M:%S'
timeseriesFormatted = mdate.DateFormatter(timeseriesFormat)
ax.xaxis.set_major_formatter(timeseriesFormatted)
fig.autofmt_xdate()
plt.ylim(bottom = 0)
StartTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochStartTime))
EndTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochEndTime))
plt.ylabel('Watt')
plt.title(f'Time range: {StartTime} - {EndTime}')
plt.tight_layout()
plt.legend()
plt.show()
plt.close()
| 29.130435
| 84
| 0.680597
|
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import numpy as np
import rrdtool
start = 2628000
end = 0
if int(end) <= 0:
end = 2
if int(start) <= 0:
start = 600
epochTimeNow = int(time.time()-1)
data = rrdtool.fetch('/home/bca/rrdtoolfilesave/powerCapturenew.rrd', 'AVERAGE',
'--start', f'-{start}',
'--end', f'-{end}')
values = np.array(data[2])
values[values == None] = 0
epochEndTime = epochTimeNow - int(end)
epochStartTime = epochTimeNow - int(start)
timeseries = np.zeros(shape=((epochEndTime-epochStartTime + 1), 1))
for i in range (epochEndTime - epochStartTime + 1):
timeseries[i] = epochStartTime + 7200 + i
fig, ax = plt.subplots()
timeseries = mdate.epoch2num(timeseries)
ax.plot_date(timeseries, values, linestyle = '-', marker = '', label=f'AllThePower')
timeseriesFormat = '%d-%m-%y %H:%M:%S'
timeseriesFormatted = mdate.DateFormatter(timeseriesFormat)
ax.xaxis.set_major_formatter(timeseriesFormatted)
fig.autofmt_xdate()
plt.ylim(bottom = 0)
StartTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochStartTime))
EndTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochEndTime))
plt.ylabel('Watt')
plt.title(f'Time range: {StartTime} - {EndTime}')
plt.tight_layout()
plt.legend()
plt.show()
plt.close()
| true
| true
|
79076fc8ab25045625ad856ab2bf8bfbdff1e38d
| 28,127
|
py
|
Python
|
gsfpy3_09/gsfSensorSpecific.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 7
|
2020-07-01T07:12:19.000Z
|
2022-01-20T20:39:57.000Z
|
gsfpy3_09/gsfSensorSpecific.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 36
|
2020-06-23T09:10:15.000Z
|
2022-03-22T10:27:58.000Z
|
gsfpy3_09/gsfSensorSpecific.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 2
|
2021-02-07T13:21:52.000Z
|
2021-06-24T19:16:16.000Z
|
from ctypes import (
Structure,
Union,
c_char,
c_double,
c_int,
c_long,
c_short,
c_ubyte,
c_uint,
c_ulong,
c_ushort,
)
from . import timespec
class c_gsfSeaBeamSpecific(Structure):
_fields_ = [("EclipseTime", c_ushort)]
class c_gsfEM100Specific(Structure):
_fields_ = [
("ship_pitch", c_double),
("transducer_pitch", c_double),
("mode", c_int),
("power", c_int),
("attenuation", c_int),
("tvg", c_int),
("pulse_length", c_int),
("counter", c_int),
]
class c_gsfEM121ASpecific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("valid_beams", c_int),
("pulse_length", c_int),
("beam_width", c_int),
("tx_power", c_int),
("tx_status", c_int),
("rx_status", c_int),
("surface_velocity", c_double),
]
class c_gsfSeaBatSpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
]
class c_gsfEM950Specific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("ping_quality", c_int),
("ship_pitch", c_double),
("transducer_pitch", c_double),
("surface_velocity", c_double),
]
SEAMAP_DOUBLE_ARRAY_OF_2 = c_double * 2
class c_gsfSeamapSpecific(Structure):
_fields_ = [
("portTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("stbdTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("portGain", c_double),
("stbdGain", c_double),
("portPulseLength", c_double),
("stbdPulseLength", c_double),
("pressureDepth", c_double),
("altitude", c_double),
("temperature", c_double),
]
class c_gsfTypeIIISpecific(Structure):
_fields_ = [
("leftmost_beam", c_ushort),
("rightmost_beam", c_ushort),
("total_beams", c_ushort),
("nav_mode", c_ushort),
("ping_number", c_ushort),
("mission_number", c_ushort),
]
class c_gsfCmpSassSpecific(Structure):
_fields_ = [("lfreq", c_double), ("lntens", c_double)]
class c_gsfSBAmpSpecific(Structure):
_fields_ = [
("hour", c_ushort),
("minute", c_ushort),
("second", c_ushort),
("hundredths", c_ushort),
("block_number", c_uint),
("avg_gate_depth", c_short),
]
SEA_BAT_CHAR_ARRAY_OF_4 = c_char * 4
class c_gsfSeaBatIISpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
class c_gsfSeaBat8101Specific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("pulse_width", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("projector", c_int),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
SEA_BEAM_ALGORITHM_ORDER = c_char * 5
SEA_BEAM_SPARE = c_char * 2
class c_gsfSeaBeam2112Specific(Structure):
_fields_ = [
("mode", c_int),
("surface_velocity", c_double),
("ssv_source", c_char),
("ping_gain", c_int),
("pulse_width", c_int),
("transmitter_attenuation", c_int),
("number_algorithms", c_int),
("algorithm_order", SEA_BEAM_ALGORITHM_ORDER),
("spare", SEA_BEAM_SPARE),
]
class c_gsfElacMkIISpecific(Structure):
_fields_ = [
("mode", c_int),
("ping_num", c_int),
("sound_vel", c_int),
("pulse_length", c_int),
("receiver_gain_stbd", c_int),
("receiver_gain_port", c_int),
("reserved", c_int),
]
class c_gsfEM3RunTime(Structure):
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_number", c_int),
("serial_number", c_int),
("system_status", c_int),
("filter_id", c_int),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("pulse_length", c_double),
("transmit_beam_width", c_double),
("power_reduction", c_int),
("receive_beam_width", c_double),
("receive_bandwidth", c_int),
("receive_gain", c_int),
("cross_over_angle", c_int),
("ssv_source", c_int),
("swath_width", c_int),
("beam_spacing", c_int),
("coverage_sector", c_int),
("stabilization", c_int),
("port_swath_width", c_int),
("stbd_swath_width", c_int),
("port_coverage_sector", c_int),
("stbd_coverage_sector", c_int),
("hilo_freq_absorp_ratio", c_int),
("spare1", c_int),
]
EM3_RUN_TIME_2_ARRAY = c_gsfEM3RunTime * 2
class c_gsfEM3Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_number", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_beams", c_int),
("sample_rate", c_int),
("depth_difference", c_double),
("offset_multiplier", c_int),
("run_time", EM3_RUN_TIME_2_ARRAY),
]
EM3_RAW_SPARE_BYTES = c_ubyte * 16
class c_gsfEMRunTime(Structure): # 168 bytes
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_counter", c_int),
("serial_number", c_int),
("operator_station_status", c_ubyte),
("processing_unit_status", c_ubyte),
("bsp_status", c_ubyte),
("head_transceiver_status", c_ubyte),
("mode", c_ubyte),
("filter_id", c_ubyte),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("tx_pulse_length", c_double),
("tx_beam_width", c_double),
("tx_power_re_max", c_double),
("rx_beam_width", c_double),
("rx_bandwidth", c_double),
("rx_fixed_gain", c_double),
("tvg_cross_over_angle", c_double),
("ssv_source", c_ubyte),
("max_port_swath_width", c_int),
("beam_spacing", c_ubyte),
("max_port_coverage", c_int),
("stabilization", c_ubyte),
("max_stbd_coverage", c_int),
("max_stbd_swath_width", c_int),
("durotong_speed", c_double),
("hi_low_absorption_ratio", c_double),
("tx_along_tilt", c_double),
("filter_id_2", c_ubyte),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEMPUStatus(Structure): # 42 bytes
_fields_ = [
("pu_cpu_load", c_double),
("sensor_status", c_ushort),
("achieved_port_coverage", c_int),
("achieved_stbd_coverage", c_int),
("yaw_stabilization", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEM3RawTxSector(Structure): # 72 bytes
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
GSF_MAX_EM3_SECTORS = 20
EM3_RAW_SECTORS = c_gsfEM3RawTxSector * GSF_MAX_EM3_SECTORS # 1440 bytes
class c_gsfEM3RawSpecific(Structure): # 1792 bytes (1746 + 23 * 2)
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("vehicle_depth", c_double),
("depth_difference", c_double),
("offset_multiplier", c_int),
("spare_1", EM3_RAW_SPARE_BYTES),
("transmit_sectors", c_int), # 80 bytes
("sector", EM3_RAW_SECTORS), # 1520 bytes
("spare_2", EM3_RAW_SPARE_BYTES), # 1536 bytes
("run_time", c_gsfEMRunTime), # 1704 bytes
("pu_status", c_gsfEMPUStatus), # 1746 bytes
]
RESON8100_SPARE_BYTES = c_char * 2
class c_gsfReson8100Specific(Structure):
_fields_ = [
("latency", c_int),
("ping_number", c_int),
("sonar_id", c_int),
("sonar_model", c_int),
("frequency", c_int),
("surface_velocity", c_double),
("sample_rate", c_int),
("ping_rate", c_int),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("projector_type", c_int),
("projector_angle", c_int),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("filters_active", c_int),
("temperature", c_int),
("beam_spacing", c_double),
("spare", RESON8100_SPARE_BYTES),
]
RESON7100_RESERVED_1 = c_ubyte * 16
RESON7100_RESERVED_2 = c_char * 15
RESON7100_RESERVED_3 = c_char * 8
class c_gsfReson7100Specific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("reserved_1", RESON7100_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_reserved", c_uint),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_uint),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_uint),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("spreading", c_double),
("raw_data_from_7027", c_ubyte),
("reserved_2", RESON7100_RESERVED_2),
("sv_source", c_ubyte),
("layer_comp_flag", c_ubyte),
("reserved_3", RESON7100_RESERVED_3),
]
RESONTSERIES_RESERVED_1 = c_ubyte * 10
RESONTSERIES_RESERVED_2 = c_ubyte * 3
RESONTSERIES_RESERVED_3 = c_ubyte * 32
RESONTSERIES_RESERVED_7027 = c_ubyte * 420
RESONTSERIES_DEVICE_DESCRIPTION = c_char * 60
class c_gsfResonTSeriesSpecific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("number_devices", c_uint),
("system_enumerator", c_ushort),
("reserved_1", RESONTSERIES_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_mode", c_ushort),
("tx_pulse_reserved", c_ushort),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_double),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_double),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("sv_source", c_ubyte),
("spreading", c_double),
("beam_spacing_mode", c_ushort),
("sonar_source_mode", c_ushort),
("coverage_mode", c_ubyte),
("coverage_angle", c_double),
("horizontal_receiver_steering_angle", c_double),
("reserved_2", RESONTSERIES_RESERVED_2),
("uncertainty_type", c_uint),
("transmitter_steering_angle", c_double),
("applied_roll", c_double),
("detection_algorithm", c_ushort),
("detection_flags", c_uint),
("device_description", RESONTSERIES_DEVICE_DESCRIPTION),
("reserved_7027", RESONTSERIES_RESERVED_7027),
("reserved_3", RESONTSERIES_RESERVED_3),
]
EM4_SPARE_BYTES = c_ubyte * 16
class c_gsfEM4TxSector(Structure):
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("mean_absorption", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM4_SPARE_BYTES),
]
EM4_SECTORS = c_gsfEM4TxSector * 9
class c_gsfEM4Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("doppler_corr_scale", c_uint),
("vehicle_depth", c_double),
("spare_1", EM4_SPARE_BYTES),
("transmit_sectors", c_int),
("sector", EM4_SECTORS),
("spare_2", EM4_SPARE_BYTES),
("run_time", c_gsfEMRunTime),
("pu_status", c_gsfEMPUStatus),
]
GEOSWATH_SPARE_BYTES = c_char * 32
class c_gsfGeoSwathPlusSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("frequency", c_double),
("echosounder_type", c_int),
("ping_number", c_long),
("num_nav_samples", c_int),
("num_attitude_samples", c_int),
("num_heading_samples", c_int),
("num_miniSVS_samples", c_int),
("num_echosounder_samples", c_int),
("num_raa_samples", c_int),
("mean_sv", c_double),
("surface_velocity", c_double),
("valid_beams", c_int),
("sample_rate", c_double),
("pulse_length", c_double),
("ping_length", c_int),
("transmit_power", c_int),
("sidescan_gain_channel", c_int),
("stabilization", c_int),
("gps_quality", c_int),
("range_uncertainty", c_double),
("angle_uncertainty", c_double),
("spare", GEOSWATH_SPARE_BYTES),
]
KLEIN5410_SPARE_BYTES = c_char * 32
class c_gsfKlein5410BssSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("acoustic_frequency", c_double),
("sampling_frequency", c_double),
("ping_number", c_uint),
("num_samples", c_uint),
("num_raa_samples", c_uint),
("error_flags", c_uint),
("range", c_uint),
("fish_depth", c_double),
("fish_altitude", c_double),
("sound_speed", c_double),
("tx_waveform", c_int),
("altimeter", c_int),
("raw_data_config", c_uint),
("spare", KLEIN5410_SPARE_BYTES),
]
DELTAT_FILE_TYPE = c_char * 4
DELTAT_SPARE = c_char * 32
class c_gsfDeltaTSpecific(Structure):
_fields_ = [
("decode_file_type", DELTAT_FILE_TYPE),
("version", c_char),
("ping_byte_size", c_int),
("interrogation_time", timespec.c_timespec),
("samples_per_beam", c_int),
("sector_size", c_double),
("start_angle", c_double),
("angle_increment", c_double),
("acoustic_range", c_int),
("acoustic_frequency", c_int),
("sound_velocity", c_double),
("range_resolution", c_double),
("profile_tilt_angle", c_double),
("repetition_rate", c_double),
("ping_number", c_ulong),
("intensity_flag", c_ubyte),
("ping_latency", c_double),
("data_latency", c_double),
("sample_rate_flag", c_ubyte),
("option_flags", c_ubyte),
("num_pings_avg", c_int),
("center_ping_time_offset", c_double),
("user_defined_byte", c_ubyte),
("altitude", c_double),
("external_sensor_flags", c_char),
("pulse_length", c_double),
("fore_aft_beamwidth", c_double),
("athwartships_beamwidth", c_double),
("spare", DELTAT_SPARE),
]
EM12_SPARE = c_char * 32
class c_gsfEM12Specific(Structure):
_fields_ = [
("ping_number", c_int),
("resolution", c_int),
("ping_quality", c_int),
("sound_velocity", c_double),
("mode", c_int),
("spare", EM12_SPARE),
]
R2SONIC_MODELNO = c_ubyte * 12
R2SONIC_SERIALNO = c_ubyte * 12
R2SONIC_INFO = c_double * 12
R2SONIC_SPARE = c_ubyte * 32
class c_gsfR2SonicSpecific(Structure):
_fields_ = [
("model_number", R2SONIC_MODELNO),
("serial_number", R2SONIC_SERIALNO),
("dg_time", timespec.c_timespec),
("ping_number", c_uint),
("ping_period", c_double),
("sound_speed", c_double),
("frequency", c_double),
("tx_power", c_double),
("tx_pulse_width", c_double),
("tx_beamwidth_vert", c_double),
("tx_beamwidth_horiz", c_double),
("tx_steering_vert", c_double),
("tx_steering_horiz", c_double),
("tx_misc_info", c_uint),
("rx_bandwidth", c_double),
("rx_sample_rate", c_double),
("rx_range", c_double),
("rx_gain", c_double),
("rx_spreading", c_double),
("rx_absorption", c_double),
("rx_mount_tilt", c_double),
("rx_misc_info", c_uint),
("reserved", c_ushort),
("num_beams", c_ushort),
("A0_more_info", R2SONIC_INFO),
("A2_more_info", R2SONIC_INFO),
("G0_depth_gate_min", c_double),
("G0_depth_gate_max", c_double),
("G0_depth_gate_slope", c_double),
("spare", R2SONIC_SPARE),
]
SBECHOTRAC_SPARE = c_char * 4
class c_gsfSBEchotracSpecific(Structure):
_fields_ = [
("navigation_error", c_int),
("mpp_source", c_ushort),
("tide_source", c_ushort),
("dynamic_draft", c_double),
("spare", SBECHOTRAC_SPARE),
]
SBMGD77_SPARE = c_char * 4
class c_gsfSBMGD77Specific(Structure):
_fields_ = [
("time_zone_corr", c_ushort),
("position_type_code", c_ushort),
("correction_code", c_ushort),
("bathy_type_code", c_ushort),
("quality_code", c_ushort),
("travel_time", c_double),
("spare", SBMGD77_SPARE),
]
SBBDB_SPARE = c_char * 4
class c_gsfSBBDBSpecific(Structure):
_fields_ = [
("doc_no", c_int),
("eval", c_char),
("classification", c_char),
("track_adj_flag", c_char),
("source_flag", c_char),
("pt_or_track_ln", c_char),
("datum_flag", c_char),
("spare", c_char),
]
SBNOSHDB_SPARE = c_char * 4
class c_gsfSBNOSHDBSpecific(Structure):
_fields_ = [
("type_code", c_ushort),
("carto_code", c_ushort),
("spare", SBNOSHDB_SPARE),
]
SBNAVISOUND_SPARE = c_char * 8
class c_gsfSBNavisoundSpecific(Structure):
_fields_ = [
("pulse_length", c_double),
("spare", SBNAVISOUND_SPARE),
]
KMALL_TX_SECTOR_SPARE_BYTES = c_ubyte * 20
class c_gsfKMALLTxSector(Structure):
_fields_ = [
("txSectorNumb", c_int),
("txArrNumber", c_int),
("txSubArray", c_int),
("sectorTransmitDelay_sec", c_double),
("tiltAngleReTx_deg", c_double),
("txNominalSourceLevel_dB", c_double),
("txFocusRange_m", c_double),
("centreFreq_Hz", c_double),
("signalBandWidth_Hz", c_double),
("totalSignalLength_sec", c_double),
("pulseShading", c_int),
("signalWaveForm", c_int),
("spare1", KMALL_TX_SECTOR_SPARE_BYTES)
]
KMALL_EXTRA_DET_SPARE_BYTES = c_ubyte * 32
class c_gsfKMALLExtraDetClass(Structure):
_fields_ = [
("numExtraDetInClass", c_int),
("alarmFlag", c_int),
("spare", KMALL_EXTRA_DET_SPARE_BYTES)
]
# Sensor specific data structures for the Kongsberg 2040 / SIS 5.0 */
KMALL_SPARE_BYTES_1 = c_ubyte * 8
KMALL_SPARE_BYTES_2 = c_ubyte * 16
KMALL_SPARE_BYTES_3 = c_ubyte * 32
KMALL_SPARE_BYTES_4 = c_ubyte * 32
KMALL_SPARE_BYTES_5 = c_ubyte * 32
KMALL_SECTOR = c_gsfKMALLTxSector * 9
KMALL_EXTRA_DET_CLASS_INFO = c_gsfKMALLExtraDetClass * 11
class c_gsfKMALLSpecific(Structure):
_fields_ = [
("gsfKMALLVersion", c_int),
("dgmType", c_int),
("dgmVersion", c_int),
("systemID", c_int),
("echoSounderID", c_int),
("spare1", KMALL_SPARE_BYTES_1),
("numBytesCmnPart", c_int),
("pingCnt", c_int),
("rxFansPerPing", c_int),
("rxFanIndex", c_int),
("swathsPerPing", c_int),
("swathAlongPosition", c_int),
("txTransducerInd", c_int),
("rxTransducerInd", c_int),
("numRxTransducers", c_int),
("algorithmType", c_int),
("spare2", KMALL_SPARE_BYTES_2),
("numBytesInfoData", c_int),
("pingRate_Hz", c_double),
("beamSpacing", c_int),
("depthMode", c_int),
("subDepthMode", c_int),
("distanceBtwSwath", c_int),
("detectionMode", c_int),
("pulseForm", c_int),
("frequencyMode_Hz", c_double),
("freqRangeLowLim_Hz", c_double),
("freqRangeHighLim_Hz", c_double),
("maxTotalTxPulseLength_sec", c_double),
("maxEffTxPulseLength_sec", c_double),
("maxEffTxBandWidth_Hz", c_double),
("absCoeff_dBPerkm", c_double),
("portSectorEdge_deg", c_double),
("starbSectorEdge_deg", c_double),
("portMeanCov_deg", c_double),
("starbMeanCov_deg", c_double),
("portMeanCov_m", c_double),
("starbMeanCov_m", c_double),
("modeAndStabilisation", c_int),
("runtimeFilter1", c_int),
("runtimeFilter2", c_int),
("pipeTrackingStatus", c_int),
("transmitArraySizeUsed_deg", c_double),
("receiveArraySizeUsed_deg", c_double),
("transmitPower_dB", c_double),
("SLrampUpTimeRemaining", c_int),
("yawAngle_deg", c_double),
("numTxSectors", c_int),
("numBytesPerTxSector", c_int),
("headingVessel_deg", c_double),
("soundSpeedAtTxDepth_mPerSec", c_double),
("txTransducerDepth_m", c_double),
("z_waterLevelReRefPoint_m", c_double),
("x_kmallToall_m", c_double),
("y_kmallToall_m", c_double),
("latLongInfo", c_int),
("posSensorStatus", c_int),
("attitudeSensorStatus", c_int),
("latitude_deg", c_double),
("longitude_deg", c_double),
("ellipsoidHeightReRefPoint_m", c_double),
("spare3", KMALL_SPARE_BYTES_3),
("sector", KMALL_SECTOR),
("numBytesRxInfo", c_int),
("numSoundingsMaxMain", c_int),
("numSoundingsValidMain", c_int),
("numBytesPerSounding", c_int),
("WCSampleRate", c_double),
("seabedImageSampleRate", c_double),
("BSnormal_dB", c_double),
("BSoblique_dB", c_double),
("extraDetectionAlarmFlag", c_int),
("numExtraDetections", c_int),
("numExtraDetectionClasses", c_int),
("numBytesPerClass", c_int),
("spare4", KMALL_SPARE_BYTES_4),
("extraDetClassInfo", KMALL_EXTRA_DET_CLASS_INFO),
("spare5", KMALL_SPARE_BYTES_5)
]
class c_gsfSensorSpecific(Union):
_fields_ = [
("gsfSeaBeamSpecific", c_gsfSeaBeamSpecific),
("gsfEM100Specific", c_gsfEM100Specific),
("gsfEM121ASpecific", c_gsfEM121ASpecific),
("gsfEM121Specific", c_gsfEM121ASpecific),
("gsfSeaBatSpecific", c_gsfSeaBatSpecific),
("gsfEM950Specific", c_gsfEM950Specific),
("gsfEM1000Specific", c_gsfEM950Specific),
("gsfSeamapSpecific", c_gsfSeamapSpecific),
("gsfTypeIIISeaBeamSpecific", c_gsfTypeIIISpecific),
("gsfSASSSpecific", c_gsfTypeIIISpecific),
("gsfCmpSassSpecific", c_gsfCmpSassSpecific),
("gsfSBAmpSpecific", c_gsfSBAmpSpecific),
("gsfSeaBatIISpecific", c_gsfSeaBatIISpecific),
("gsfSeaBat8101Specific", c_gsfSeaBat8101Specific),
("gsfSeaBeam2112Specific", c_gsfSeaBeam2112Specific),
("gsfElacMkIISpecific", c_gsfElacMkIISpecific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
("gsfEM3Specific", c_gsfEM3Specific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
# with raw range and beam angle
("gsfEM3RawSpecific", c_gsfEM3RawSpecific),
("gsfReson8100Specific", c_gsfReson8100Specific),
("gsfReson7100Specific", c_gsfReson7100Specific),
# used for T50 and T20
("gsfResonTSeriesSpecific", c_gsfResonTSeriesSpecific),
# used for EM710, EM302, EM122, and EM2040
("gsfEM4Specific", c_gsfEM4Specific),
# DHG 2006/09/27 Use for GeoSwath+ interferometer
("gsfGeoSwathPlusSpecific", c_gsfGeoSwathPlusSpecific),
# Use for Klein 5410 Bathy Sidescan
("gsfKlein5410BssSpecific", c_gsfKlein5410BssSpecific),
("gsfDeltaTSpecific", c_gsfDeltaTSpecific),
("gsfEM12Specific", c_gsfEM12Specific),
("gsfR2SonicSpecific", c_gsfR2SonicSpecific),
("gsfKMallSpecific", c_gsfKMALLSpecific),
("gsfSBEchotracSpecific", c_gsfSBEchotracSpecific),
("gsfSBBathy2000Specific", c_gsfSBEchotracSpecific),
("gsfSBMGD77Specific", c_gsfSBMGD77Specific),
("gsfSBBDBSpecific", c_gsfSBBDBSpecific),
("gsfSBNOSHDBSpecific", c_gsfSBNOSHDBSpecific),
("gsfSBPDDSpecific", c_gsfSBEchotracSpecific),
("gsfSBNavisoundSpecific", c_gsfSBNavisoundSpecific),
]
| 30.908791
| 76
| 0.598606
|
from ctypes import (
Structure,
Union,
c_char,
c_double,
c_int,
c_long,
c_short,
c_ubyte,
c_uint,
c_ulong,
c_ushort,
)
from . import timespec
class c_gsfSeaBeamSpecific(Structure):
_fields_ = [("EclipseTime", c_ushort)]
class c_gsfEM100Specific(Structure):
_fields_ = [
("ship_pitch", c_double),
("transducer_pitch", c_double),
("mode", c_int),
("power", c_int),
("attenuation", c_int),
("tvg", c_int),
("pulse_length", c_int),
("counter", c_int),
]
class c_gsfEM121ASpecific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("valid_beams", c_int),
("pulse_length", c_int),
("beam_width", c_int),
("tx_power", c_int),
("tx_status", c_int),
("rx_status", c_int),
("surface_velocity", c_double),
]
class c_gsfSeaBatSpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
]
class c_gsfEM950Specific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("ping_quality", c_int),
("ship_pitch", c_double),
("transducer_pitch", c_double),
("surface_velocity", c_double),
]
SEAMAP_DOUBLE_ARRAY_OF_2 = c_double * 2
class c_gsfSeamapSpecific(Structure):
_fields_ = [
("portTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("stbdTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("portGain", c_double),
("stbdGain", c_double),
("portPulseLength", c_double),
("stbdPulseLength", c_double),
("pressureDepth", c_double),
("altitude", c_double),
("temperature", c_double),
]
class c_gsfTypeIIISpecific(Structure):
_fields_ = [
("leftmost_beam", c_ushort),
("rightmost_beam", c_ushort),
("total_beams", c_ushort),
("nav_mode", c_ushort),
("ping_number", c_ushort),
("mission_number", c_ushort),
]
class c_gsfCmpSassSpecific(Structure):
_fields_ = [("lfreq", c_double), ("lntens", c_double)]
class c_gsfSBAmpSpecific(Structure):
_fields_ = [
("hour", c_ushort),
("minute", c_ushort),
("second", c_ushort),
("hundredths", c_ushort),
("block_number", c_uint),
("avg_gate_depth", c_short),
]
SEA_BAT_CHAR_ARRAY_OF_4 = c_char * 4
class c_gsfSeaBatIISpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
class c_gsfSeaBat8101Specific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("pulse_width", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("projector", c_int),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
SEA_BEAM_ALGORITHM_ORDER = c_char * 5
SEA_BEAM_SPARE = c_char * 2
class c_gsfSeaBeam2112Specific(Structure):
_fields_ = [
("mode", c_int),
("surface_velocity", c_double),
("ssv_source", c_char),
("ping_gain", c_int),
("pulse_width", c_int),
("transmitter_attenuation", c_int),
("number_algorithms", c_int),
("algorithm_order", SEA_BEAM_ALGORITHM_ORDER),
("spare", SEA_BEAM_SPARE),
]
class c_gsfElacMkIISpecific(Structure):
_fields_ = [
("mode", c_int),
("ping_num", c_int),
("sound_vel", c_int),
("pulse_length", c_int),
("receiver_gain_stbd", c_int),
("receiver_gain_port", c_int),
("reserved", c_int),
]
class c_gsfEM3RunTime(Structure):
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_number", c_int),
("serial_number", c_int),
("system_status", c_int),
("filter_id", c_int),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("pulse_length", c_double),
("transmit_beam_width", c_double),
("power_reduction", c_int),
("receive_beam_width", c_double),
("receive_bandwidth", c_int),
("receive_gain", c_int),
("cross_over_angle", c_int),
("ssv_source", c_int),
("swath_width", c_int),
("beam_spacing", c_int),
("coverage_sector", c_int),
("stabilization", c_int),
("port_swath_width", c_int),
("stbd_swath_width", c_int),
("port_coverage_sector", c_int),
("stbd_coverage_sector", c_int),
("hilo_freq_absorp_ratio", c_int),
("spare1", c_int),
]
EM3_RUN_TIME_2_ARRAY = c_gsfEM3RunTime * 2
class c_gsfEM3Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_number", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_beams", c_int),
("sample_rate", c_int),
("depth_difference", c_double),
("offset_multiplier", c_int),
("run_time", EM3_RUN_TIME_2_ARRAY),
]
EM3_RAW_SPARE_BYTES = c_ubyte * 16
class c_gsfEMRunTime(Structure):
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_counter", c_int),
("serial_number", c_int),
("operator_station_status", c_ubyte),
("processing_unit_status", c_ubyte),
("bsp_status", c_ubyte),
("head_transceiver_status", c_ubyte),
("mode", c_ubyte),
("filter_id", c_ubyte),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("tx_pulse_length", c_double),
("tx_beam_width", c_double),
("tx_power_re_max", c_double),
("rx_beam_width", c_double),
("rx_bandwidth", c_double),
("rx_fixed_gain", c_double),
("tvg_cross_over_angle", c_double),
("ssv_source", c_ubyte),
("max_port_swath_width", c_int),
("beam_spacing", c_ubyte),
("max_port_coverage", c_int),
("stabilization", c_ubyte),
("max_stbd_coverage", c_int),
("max_stbd_swath_width", c_int),
("durotong_speed", c_double),
("hi_low_absorption_ratio", c_double),
("tx_along_tilt", c_double),
("filter_id_2", c_ubyte),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEMPUStatus(Structure):
_fields_ = [
("pu_cpu_load", c_double),
("sensor_status", c_ushort),
("achieved_port_coverage", c_int),
("achieved_stbd_coverage", c_int),
("yaw_stabilization", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEM3RawTxSector(Structure):
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
GSF_MAX_EM3_SECTORS = 20
EM3_RAW_SECTORS = c_gsfEM3RawTxSector * GSF_MAX_EM3_SECTORS
class c_gsfEM3RawSpecific(Structure):
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("vehicle_depth", c_double),
("depth_difference", c_double),
("offset_multiplier", c_int),
("spare_1", EM3_RAW_SPARE_BYTES),
("transmit_sectors", c_int),
("sector", EM3_RAW_SECTORS),
("spare_2", EM3_RAW_SPARE_BYTES),
("run_time", c_gsfEMRunTime),
("pu_status", c_gsfEMPUStatus),
]
RESON8100_SPARE_BYTES = c_char * 2
class c_gsfReson8100Specific(Structure):
_fields_ = [
("latency", c_int),
("ping_number", c_int),
("sonar_id", c_int),
("sonar_model", c_int),
("frequency", c_int),
("surface_velocity", c_double),
("sample_rate", c_int),
("ping_rate", c_int),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("projector_type", c_int),
("projector_angle", c_int),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("filters_active", c_int),
("temperature", c_int),
("beam_spacing", c_double),
("spare", RESON8100_SPARE_BYTES),
]
RESON7100_RESERVED_1 = c_ubyte * 16
RESON7100_RESERVED_2 = c_char * 15
RESON7100_RESERVED_3 = c_char * 8
class c_gsfReson7100Specific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("reserved_1", RESON7100_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_reserved", c_uint),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_uint),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_uint),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("spreading", c_double),
("raw_data_from_7027", c_ubyte),
("reserved_2", RESON7100_RESERVED_2),
("sv_source", c_ubyte),
("layer_comp_flag", c_ubyte),
("reserved_3", RESON7100_RESERVED_3),
]
RESONTSERIES_RESERVED_1 = c_ubyte * 10
RESONTSERIES_RESERVED_2 = c_ubyte * 3
RESONTSERIES_RESERVED_3 = c_ubyte * 32
RESONTSERIES_RESERVED_7027 = c_ubyte * 420
RESONTSERIES_DEVICE_DESCRIPTION = c_char * 60
class c_gsfResonTSeriesSpecific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("number_devices", c_uint),
("system_enumerator", c_ushort),
("reserved_1", RESONTSERIES_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_mode", c_ushort),
("tx_pulse_reserved", c_ushort),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_double),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_double),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("sv_source", c_ubyte),
("spreading", c_double),
("beam_spacing_mode", c_ushort),
("sonar_source_mode", c_ushort),
("coverage_mode", c_ubyte),
("coverage_angle", c_double),
("horizontal_receiver_steering_angle", c_double),
("reserved_2", RESONTSERIES_RESERVED_2),
("uncertainty_type", c_uint),
("transmitter_steering_angle", c_double),
("applied_roll", c_double),
("detection_algorithm", c_ushort),
("detection_flags", c_uint),
("device_description", RESONTSERIES_DEVICE_DESCRIPTION),
("reserved_7027", RESONTSERIES_RESERVED_7027),
("reserved_3", RESONTSERIES_RESERVED_3),
]
EM4_SPARE_BYTES = c_ubyte * 16
class c_gsfEM4TxSector(Structure):
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("mean_absorption", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM4_SPARE_BYTES),
]
EM4_SECTORS = c_gsfEM4TxSector * 9
class c_gsfEM4Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("doppler_corr_scale", c_uint),
("vehicle_depth", c_double),
("spare_1", EM4_SPARE_BYTES),
("transmit_sectors", c_int),
("sector", EM4_SECTORS),
("spare_2", EM4_SPARE_BYTES),
("run_time", c_gsfEMRunTime),
("pu_status", c_gsfEMPUStatus),
]
GEOSWATH_SPARE_BYTES = c_char * 32
class c_gsfGeoSwathPlusSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("frequency", c_double),
("echosounder_type", c_int),
("ping_number", c_long),
("num_nav_samples", c_int),
("num_attitude_samples", c_int),
("num_heading_samples", c_int),
("num_miniSVS_samples", c_int),
("num_echosounder_samples", c_int),
("num_raa_samples", c_int),
("mean_sv", c_double),
("surface_velocity", c_double),
("valid_beams", c_int),
("sample_rate", c_double),
("pulse_length", c_double),
("ping_length", c_int),
("transmit_power", c_int),
("sidescan_gain_channel", c_int),
("stabilization", c_int),
("gps_quality", c_int),
("range_uncertainty", c_double),
("angle_uncertainty", c_double),
("spare", GEOSWATH_SPARE_BYTES),
]
KLEIN5410_SPARE_BYTES = c_char * 32
class c_gsfKlein5410BssSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("acoustic_frequency", c_double),
("sampling_frequency", c_double),
("ping_number", c_uint),
("num_samples", c_uint),
("num_raa_samples", c_uint),
("error_flags", c_uint),
("range", c_uint),
("fish_depth", c_double),
("fish_altitude", c_double),
("sound_speed", c_double),
("tx_waveform", c_int),
("altimeter", c_int),
("raw_data_config", c_uint),
("spare", KLEIN5410_SPARE_BYTES),
]
DELTAT_FILE_TYPE = c_char * 4
DELTAT_SPARE = c_char * 32
class c_gsfDeltaTSpecific(Structure):
_fields_ = [
("decode_file_type", DELTAT_FILE_TYPE),
("version", c_char),
("ping_byte_size", c_int),
("interrogation_time", timespec.c_timespec),
("samples_per_beam", c_int),
("sector_size", c_double),
("start_angle", c_double),
("angle_increment", c_double),
("acoustic_range", c_int),
("acoustic_frequency", c_int),
("sound_velocity", c_double),
("range_resolution", c_double),
("profile_tilt_angle", c_double),
("repetition_rate", c_double),
("ping_number", c_ulong),
("intensity_flag", c_ubyte),
("ping_latency", c_double),
("data_latency", c_double),
("sample_rate_flag", c_ubyte),
("option_flags", c_ubyte),
("num_pings_avg", c_int),
("center_ping_time_offset", c_double),
("user_defined_byte", c_ubyte),
("altitude", c_double),
("external_sensor_flags", c_char),
("pulse_length", c_double),
("fore_aft_beamwidth", c_double),
("athwartships_beamwidth", c_double),
("spare", DELTAT_SPARE),
]
EM12_SPARE = c_char * 32
class c_gsfEM12Specific(Structure):
_fields_ = [
("ping_number", c_int),
("resolution", c_int),
("ping_quality", c_int),
("sound_velocity", c_double),
("mode", c_int),
("spare", EM12_SPARE),
]
R2SONIC_MODELNO = c_ubyte * 12
R2SONIC_SERIALNO = c_ubyte * 12
R2SONIC_INFO = c_double * 12
R2SONIC_SPARE = c_ubyte * 32
class c_gsfR2SonicSpecific(Structure):
_fields_ = [
("model_number", R2SONIC_MODELNO),
("serial_number", R2SONIC_SERIALNO),
("dg_time", timespec.c_timespec),
("ping_number", c_uint),
("ping_period", c_double),
("sound_speed", c_double),
("frequency", c_double),
("tx_power", c_double),
("tx_pulse_width", c_double),
("tx_beamwidth_vert", c_double),
("tx_beamwidth_horiz", c_double),
("tx_steering_vert", c_double),
("tx_steering_horiz", c_double),
("tx_misc_info", c_uint),
("rx_bandwidth", c_double),
("rx_sample_rate", c_double),
("rx_range", c_double),
("rx_gain", c_double),
("rx_spreading", c_double),
("rx_absorption", c_double),
("rx_mount_tilt", c_double),
("rx_misc_info", c_uint),
("reserved", c_ushort),
("num_beams", c_ushort),
("A0_more_info", R2SONIC_INFO),
("A2_more_info", R2SONIC_INFO),
("G0_depth_gate_min", c_double),
("G0_depth_gate_max", c_double),
("G0_depth_gate_slope", c_double),
("spare", R2SONIC_SPARE),
]
SBECHOTRAC_SPARE = c_char * 4
class c_gsfSBEchotracSpecific(Structure):
_fields_ = [
("navigation_error", c_int),
("mpp_source", c_ushort),
("tide_source", c_ushort),
("dynamic_draft", c_double),
("spare", SBECHOTRAC_SPARE),
]
SBMGD77_SPARE = c_char * 4
class c_gsfSBMGD77Specific(Structure):
_fields_ = [
("time_zone_corr", c_ushort),
("position_type_code", c_ushort),
("correction_code", c_ushort),
("bathy_type_code", c_ushort),
("quality_code", c_ushort),
("travel_time", c_double),
("spare", SBMGD77_SPARE),
]
SBBDB_SPARE = c_char * 4
class c_gsfSBBDBSpecific(Structure):
_fields_ = [
("doc_no", c_int),
("eval", c_char),
("classification", c_char),
("track_adj_flag", c_char),
("source_flag", c_char),
("pt_or_track_ln", c_char),
("datum_flag", c_char),
("spare", c_char),
]
SBNOSHDB_SPARE = c_char * 4
class c_gsfSBNOSHDBSpecific(Structure):
_fields_ = [
("type_code", c_ushort),
("carto_code", c_ushort),
("spare", SBNOSHDB_SPARE),
]
SBNAVISOUND_SPARE = c_char * 8
class c_gsfSBNavisoundSpecific(Structure):
_fields_ = [
("pulse_length", c_double),
("spare", SBNAVISOUND_SPARE),
]
KMALL_TX_SECTOR_SPARE_BYTES = c_ubyte * 20
class c_gsfKMALLTxSector(Structure):
_fields_ = [
("txSectorNumb", c_int),
("txArrNumber", c_int),
("txSubArray", c_int),
("sectorTransmitDelay_sec", c_double),
("tiltAngleReTx_deg", c_double),
("txNominalSourceLevel_dB", c_double),
("txFocusRange_m", c_double),
("centreFreq_Hz", c_double),
("signalBandWidth_Hz", c_double),
("totalSignalLength_sec", c_double),
("pulseShading", c_int),
("signalWaveForm", c_int),
("spare1", KMALL_TX_SECTOR_SPARE_BYTES)
]
KMALL_EXTRA_DET_SPARE_BYTES = c_ubyte * 32
class c_gsfKMALLExtraDetClass(Structure):
_fields_ = [
("numExtraDetInClass", c_int),
("alarmFlag", c_int),
("spare", KMALL_EXTRA_DET_SPARE_BYTES)
]
KMALL_SPARE_BYTES_1 = c_ubyte * 8
KMALL_SPARE_BYTES_2 = c_ubyte * 16
KMALL_SPARE_BYTES_3 = c_ubyte * 32
KMALL_SPARE_BYTES_4 = c_ubyte * 32
KMALL_SPARE_BYTES_5 = c_ubyte * 32
KMALL_SECTOR = c_gsfKMALLTxSector * 9
KMALL_EXTRA_DET_CLASS_INFO = c_gsfKMALLExtraDetClass * 11
class c_gsfKMALLSpecific(Structure):
_fields_ = [
("gsfKMALLVersion", c_int),
("dgmType", c_int),
("dgmVersion", c_int),
("systemID", c_int),
("echoSounderID", c_int),
("spare1", KMALL_SPARE_BYTES_1),
("numBytesCmnPart", c_int),
("pingCnt", c_int),
("rxFansPerPing", c_int),
("rxFanIndex", c_int),
("swathsPerPing", c_int),
("swathAlongPosition", c_int),
("txTransducerInd", c_int),
("rxTransducerInd", c_int),
("numRxTransducers", c_int),
("algorithmType", c_int),
("spare2", KMALL_SPARE_BYTES_2),
("numBytesInfoData", c_int),
("pingRate_Hz", c_double),
("beamSpacing", c_int),
("depthMode", c_int),
("subDepthMode", c_int),
("distanceBtwSwath", c_int),
("detectionMode", c_int),
("pulseForm", c_int),
("frequencyMode_Hz", c_double),
("freqRangeLowLim_Hz", c_double),
("freqRangeHighLim_Hz", c_double),
("maxTotalTxPulseLength_sec", c_double),
("maxEffTxPulseLength_sec", c_double),
("maxEffTxBandWidth_Hz", c_double),
("absCoeff_dBPerkm", c_double),
("portSectorEdge_deg", c_double),
("starbSectorEdge_deg", c_double),
("portMeanCov_deg", c_double),
("starbMeanCov_deg", c_double),
("portMeanCov_m", c_double),
("starbMeanCov_m", c_double),
("modeAndStabilisation", c_int),
("runtimeFilter1", c_int),
("runtimeFilter2", c_int),
("pipeTrackingStatus", c_int),
("transmitArraySizeUsed_deg", c_double),
("receiveArraySizeUsed_deg", c_double),
("transmitPower_dB", c_double),
("SLrampUpTimeRemaining", c_int),
("yawAngle_deg", c_double),
("numTxSectors", c_int),
("numBytesPerTxSector", c_int),
("headingVessel_deg", c_double),
("soundSpeedAtTxDepth_mPerSec", c_double),
("txTransducerDepth_m", c_double),
("z_waterLevelReRefPoint_m", c_double),
("x_kmallToall_m", c_double),
("y_kmallToall_m", c_double),
("latLongInfo", c_int),
("posSensorStatus", c_int),
("attitudeSensorStatus", c_int),
("latitude_deg", c_double),
("longitude_deg", c_double),
("ellipsoidHeightReRefPoint_m", c_double),
("spare3", KMALL_SPARE_BYTES_3),
("sector", KMALL_SECTOR),
("numBytesRxInfo", c_int),
("numSoundingsMaxMain", c_int),
("numSoundingsValidMain", c_int),
("numBytesPerSounding", c_int),
("WCSampleRate", c_double),
("seabedImageSampleRate", c_double),
("BSnormal_dB", c_double),
("BSoblique_dB", c_double),
("extraDetectionAlarmFlag", c_int),
("numExtraDetections", c_int),
("numExtraDetectionClasses", c_int),
("numBytesPerClass", c_int),
("spare4", KMALL_SPARE_BYTES_4),
("extraDetClassInfo", KMALL_EXTRA_DET_CLASS_INFO),
("spare5", KMALL_SPARE_BYTES_5)
]
class c_gsfSensorSpecific(Union):
_fields_ = [
("gsfSeaBeamSpecific", c_gsfSeaBeamSpecific),
("gsfEM100Specific", c_gsfEM100Specific),
("gsfEM121ASpecific", c_gsfEM121ASpecific),
("gsfEM121Specific", c_gsfEM121ASpecific),
("gsfSeaBatSpecific", c_gsfSeaBatSpecific),
("gsfEM950Specific", c_gsfEM950Specific),
("gsfEM1000Specific", c_gsfEM950Specific),
("gsfSeamapSpecific", c_gsfSeamapSpecific),
("gsfTypeIIISeaBeamSpecific", c_gsfTypeIIISpecific),
("gsfSASSSpecific", c_gsfTypeIIISpecific),
("gsfCmpSassSpecific", c_gsfCmpSassSpecific),
("gsfSBAmpSpecific", c_gsfSBAmpSpecific),
("gsfSeaBatIISpecific", c_gsfSeaBatIISpecific),
("gsfSeaBat8101Specific", c_gsfSeaBat8101Specific),
("gsfSeaBeam2112Specific", c_gsfSeaBeam2112Specific),
("gsfElacMkIISpecific", c_gsfElacMkIISpecific),
("gsfEM3Specific", c_gsfEM3Specific),
("gsfEM3RawSpecific", c_gsfEM3RawSpecific),
("gsfReson8100Specific", c_gsfReson8100Specific),
("gsfReson7100Specific", c_gsfReson7100Specific),
("gsfResonTSeriesSpecific", c_gsfResonTSeriesSpecific),
("gsfEM4Specific", c_gsfEM4Specific),
("gsfGeoSwathPlusSpecific", c_gsfGeoSwathPlusSpecific),
("gsfKlein5410BssSpecific", c_gsfKlein5410BssSpecific),
("gsfDeltaTSpecific", c_gsfDeltaTSpecific),
("gsfEM12Specific", c_gsfEM12Specific),
("gsfR2SonicSpecific", c_gsfR2SonicSpecific),
("gsfKMallSpecific", c_gsfKMALLSpecific),
("gsfSBEchotracSpecific", c_gsfSBEchotracSpecific),
("gsfSBBathy2000Specific", c_gsfSBEchotracSpecific),
("gsfSBMGD77Specific", c_gsfSBMGD77Specific),
("gsfSBBDBSpecific", c_gsfSBBDBSpecific),
("gsfSBNOSHDBSpecific", c_gsfSBNOSHDBSpecific),
("gsfSBPDDSpecific", c_gsfSBEchotracSpecific),
("gsfSBNavisoundSpecific", c_gsfSBNavisoundSpecific),
]
| true
| true
|
7907702807ec663bdaf71f3b4d3f16e5b1ba333b
| 846
|
py
|
Python
|
3rdparty/meshlab-master/src/external/openkinect/wrappers/python/setup.py
|
HoEmpire/slambook2
|
96d360f32aa5d8b5c5dcbbf9ee7ba865e84409f4
|
[
"MIT"
] | 1
|
2020-02-02T13:40:22.000Z
|
2020-02-02T13:40:22.000Z
|
ext/libfreenect/wrappers/python/setup.py
|
captdeaf/CarBoundary
|
35e5cb9a87ac4e693f8abdb79de4e97973a3329a
|
[
"BSD-3-Clause"
] | null | null | null |
ext/libfreenect/wrappers/python/setup.py
|
captdeaf/CarBoundary
|
35e5cb9a87ac4e693f8abdb79de4e97973a3329a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("freenect", ["freenect.pyx"],
libraries=['usb-1.0', 'freenect', 'freenect_sync'],
runtime_library_dirs=['/usr/local/lib', '/usr/local/lib64', '/usr/lib/'],
extra_compile_args=['-fPIC', '-I', '../../include/',
'-I', '/usr/include/libusb-1.0/',
'-I', '/usr/local/include/libusb-1.0',
'-I', '/usr/local/include',
'-I', '../c_sync/'])]
setup(
name = 'freenect',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| 44.526316
| 98
| 0.471631
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("freenect", ["freenect.pyx"],
libraries=['usb-1.0', 'freenect', 'freenect_sync'],
runtime_library_dirs=['/usr/local/lib', '/usr/local/lib64', '/usr/lib/'],
extra_compile_args=['-fPIC', '-I', '../../include/',
'-I', '/usr/include/libusb-1.0/',
'-I', '/usr/local/include/libusb-1.0',
'-I', '/usr/local/include',
'-I', '../c_sync/'])]
setup(
name = 'freenect',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| true
| true
|
790770ce1fb2b1ddb9ad4ee1cd21bca3681fc4cf
| 182,295
|
py
|
Python
|
core/domain/wipeout_service_test.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
core/domain/wipeout_service_test.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
core/domain/wipeout_service_test.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wipeout service."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import logging
from constants import constants
from core.domain import auth_services
from core.domain import collection_services
from core.domain import email_manager
from core.domain import exp_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_domain
from core.domain import user_services
from core.domain import wipeout_domain
from core.domain import wipeout_service
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(
auth_models, base_models, collection_models,
config_models, email_models, exp_models,
feedback_models, improvements_models, question_models,
skill_models, story_models, subtopic_models,
suggestion_models, topic_models, user_models
) = models.Registry.import_models([
models.NAMES.auth, models.NAMES.base_model, models.NAMES.collection,
models.NAMES.config, models.NAMES.email, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.improvements, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic,
models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user
])
datastore_services = models.Registry.import_datastore_services()
class WipeoutServiceHelpersTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceHelpersTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_1_role = user_services.get_user_settings(self.user_1_id).role
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_2_role = user_services.get_user_settings(self.user_2_id).role
def test_gets_pending_deletion_request(self):
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
]
)
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(pending_deletion_request.user_id, self.user_1_id)
self.assertEqual(pending_deletion_request.email, self.USER_1_EMAIL)
self.assertEqual(pending_deletion_request.deletion_complete, False)
self.assertEqual(
pending_deletion_request.pseudonymizable_entity_mappings, {})
def test_get_number_of_pending_deletion_requests_returns_correct_number(
self):
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 0)
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role),
wipeout_domain.PendingDeletionRequest.create_default(
self.user_2_id, self.USER_2_EMAIL, self.user_2_role)
]
)
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 2)
def test_saves_pending_deletion_request_when_new(self):
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role))
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model.deletion_complete, False)
self.assertEqual(
pending_deletion_request_model.pseudonymizable_entity_mappings, {})
def test_saves_pending_deletion_request_when_already_existing(self):
pending_deletion_request_model_old = (
user_models.PendingDeletionRequestModel(
id=self.user_1_id,
email=self.USER_1_EMAIL,
role=self.user_1_role,
deletion_complete=False,
pseudonymizable_entity_mappings={}
)
)
pending_deletion_request_model_old.put()
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
)
pending_deletion_request.deletion_complete = True
pending_deletion_request.pseudonymizable_entity_mappings = {
'story': {'story_id': 'user_id'}
}
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model_new = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model_new.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model_new.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model_new.deletion_complete, True)
self.assertEqual(
pending_deletion_request_model_new.pseudonymizable_entity_mappings,
{'story': {'story_id': 'user_id'}})
self.assertEqual(
pending_deletion_request_model_old.created_on,
pending_deletion_request_model_new.created_on)
class WipeoutServicePreDeleteTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
USER_3_EMAIL = 'other@email.com'
USER_3_USERNAME = 'username3'
def setUp(self):
super(WipeoutServicePreDeleteTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_user_data.display_alias = 'name'
self.modifiable_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
def tearDown(self):
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.get_all())
for pending_deletion_request_model in pending_deletion_request_models:
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(
pending_deletion_request_model.id))
self.assertEqual(
wipeout_service.run_user_deletion(pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS)
def test_pre_delete_user_email_subscriptions(self):
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertEqual(
email_preferences.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_pre_delete_profile_users_works_correctly(self):
user_settings = user_services.get_user_settings(self.profile_user_id)
self.assertFalse(user_settings.deleted)
self.assertFalse(user_settings.deleted)
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
def test_pre_delete_user_for_full_user_also_deletes_all_profiles(self):
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
profile_user_settings = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_user_settings.deleted)
profile_auth_details = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
profile_user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(profile_user_settings.deleted)
profile_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(profile_auth_details.deleted)
def test_pre_delete_user_without_activities_works_correctly(self):
user_models.UserSubscriptionsModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
user_auth_details = auth_models.UserAuthDetailsModel.get(self.user_1_id)
self.assertFalse(user_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_username_is_not_saved_for_user_younger_than_week(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
pending_deletion_request.normalized_long_term_username)
def test_pre_delete_username_is_saved_for_user_older_than_week(self):
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
user_3_id = self.get_user_id_from_email(self.USER_3_EMAIL)
wipeout_service.pre_delete_user(user_3_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(user_3_id))
self.assertEqual(
pending_deletion_request.normalized_long_term_username,
self.USER_3_USERNAME)
def test_pre_delete_user_with_activities_multiple_owners(self):
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR)
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
self.user_1_actions,
'exp_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
rights_manager.assign_role_for_collection(
self.user_1_actions,
'col_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_user_collection_is_marked_deleted(self):
self.save_new_valid_collection('col_id', self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertFalse(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(collection_models.CollectionModel.get_by_id('col_id'))
def test_pre_delete_user_exploration_is_marked_deleted(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertFalse(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(exp_models.ExplorationModel.get_by_id('exp_id'))
def test_pre_delete_user_collection_ownership_is_released(self):
self.save_new_valid_collection('col_id', self.user_1_id)
self.publish_collection(self.user_1_id, 'col_id')
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertFalse(collection_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertTrue(collection_summary_model.community_owned)
def test_pre_delete_user_exploration_ownership_is_released(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
self.publish_exploration(self.user_1_id, 'exp_id')
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertFalse(exp_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertTrue(exp_summary_model.community_owned)
def test_pre_delete_user_collection_user_is_deassigned(self):
self.save_new_valid_collection('col_id', self.user_1_id)
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [])
def test_pre_delete_user_exploration_user_is_deassigned(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [])
def test_pre_delete_user_user_is_deassigned_from_topics(self):
self.save_new_topic('top_id', self.user_1_id)
topic_services.assign_role(
user_services.get_system_user(),
self.user_1_actions,
feconf.ROLE_MANAGER,
'top_id')
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [self.user_1_id])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [])
class WipeoutServiceRunFunctionsTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceRunFunctionsTests, self).setUp()
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
def test_run_user_deletion_with_user_not_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS
)
def test_run_user_deletion_with_user_already_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_ALREADY_DONE
)
def test_run_user_deletion_completion_with_user_not_yet_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_NOT_DELETED)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
def test_run_user_deletion_completion_with_user_properly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS
)
self.assertIsNotNone(
user_models.DeletedUserModel.get_by_id(self.user_1_id))
self.assertTrue(user_services.is_username_taken(self.USER_1_USERNAME))
self.assertIsNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
# Pre-deleted auth associations will return None.
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
def test_run_user_deletion_completion_with_user_wrongly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
user_models.CompletedActivitiesModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
email_content = (
'The Wipeout process failed for the user with ID \'%s\' '
'and email \'%s\'.' % (self.user_1_id, self.USER_1_EMAIL)
)
send_email_swap = self.swap_with_checks(
email_manager,
'send_mail_to_admin',
lambda x, y: None,
expected_args=[('WIPEOUT: Account deletion failed', email_content)]
)
with send_email_swap:
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_FAILURE)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
auth_models.UserAuthDetailsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
class WipeoutServiceDeleteConfigModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
config_models.ConfigPropertyModel(
id=self.CONFIG_1_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_config_property_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_one_config_property_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_multiple_config_properties_are_pseudonymized(self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings[self.CONFIG_1_ID])
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, config_mappings[self.CONFIG_2_ID])
def test_multiple_config_properties_with_multiple_users_are_pseudonymized(
self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
# Verify second user is not yet deleted.
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_2_ID])
def test_one_config_property_with_multiple_users_is_pseudonymized(self):
config_models.ConfigPropertyModel.get_by_id(
self.CONFIG_1_ID
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
# Verify second user is not yet deleted.
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_1_ID])
class WipeoutServiceVerifyDeleteConfigModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
config_model = config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
)
config_model.commit(self.user_1_id, [{'cmd': 'command'}])
config_model.commit(self.user_1_id, [{'cmd': 'command_2'}])
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteCollectionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_1_ID)
rights_manager.assign_role_for_collection(
user_services.UserActionsInfo(self.user_1_id),
self.COL_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_collection_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
rights_metadata_model_1 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_collection_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
rights_content_model_1 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[collection_mappings[self.COL_1_ID]])
rights_content_model_2 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-3' % self.COL_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
collection_mappings[self.COL_1_ID],
self.user_2_id
])
def test_one_collection_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_1_ID])
def test_one_collection_with_missing_snapshot_is_pseudonymized(self):
collection_models.CollectionCommitLogEntryModel(
id='collection-%s-1' % self.COL_2_ID,
collection_id=self.COL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'CollectionCommitLogEntryModel\' and '
'snapshot models [\'CollectionSnapshotMetadataModel\', '
'\'CollectionRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.COL_2_ID,
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'an_exploration_id\'].'
]
)
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_2_ID])
def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
def test_collection_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_col_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
collection_services.delete_collection(self.user_1_id, self.COL_2_ID)
collection_rights_model = (
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_rights_model.deleted)
collection_model = (
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
def test_multiple_collections_are_pseudonymized(self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_2_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_2_ID])
class WipeoutServiceVerifyDeleteCollectionModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
collection_models.CollectionSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteExplorationModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_1_ID)
rights_manager.assign_role_for_exploration(
user_services.UserActionsInfo(self.user_1_id),
self.EXP_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_exploration_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
exploration_mappings[self.EXP_1_ID])
rights_metadata_model_1 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_exploration_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
rights_content_model_1 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[exploration_mappings[self.EXP_1_ID]])
rights_content_model_2 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-3' % self.EXP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
exploration_mappings[self.EXP_1_ID],
self.user_2_id
])
def test_one_exploration_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_1_ID])
def test_one_exploration_with_missing_snapshot_is_pseudonymized(self):
exp_models.ExplorationCommitLogEntryModel(
id='exploration-%s-1' % self.EXP_2_ID,
exploration_id=self.EXP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.EXP_2_ID
]
)
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_2_ID])
def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
def test_exploration_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_exp_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
exp_services.delete_exploration(self.user_1_id, self.EXP_2_ID)
exp_rights_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_rights_model.deleted)
exp_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXP_2_ID))
def test_multiple_explorations_are_pseudonymized(self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_2_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_2_ID])
class WipeoutServiceVerifyDeleteExplorationModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
exp_models.ExplorationSnapshotMetadataModel(
id='%s-1' % self.EXP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteEmailModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_email_is_deleted(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
def test_multiple_emails_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_2_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_2_ID)))
def test_multiple_emails_from_multiple_users_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_2_id, self.THREAD_2_ID),
user_id=self.user_2_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNotNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
class WipeoutServiceVerifyDeleteEmailModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteFeedbackModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
FEEDBACK_1_ID = 'feedback_1_id'
FEEDBACK_2_ID = 'feedback_2_id'
MESSAGE_1_ID = 'message_1_id'
MESSAGE_2_ID = 'message_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
NUMBER_OF_MODELS = 150
def setUp(self):
super(WipeoutServiceDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_2_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_feedback_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is pseudonymized.
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
suggestion_model_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
suggestion_model_model.author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return feedback thread model to the original user ID.
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
feedback_thread_model.original_author_id = self.user_1_id
feedback_thread_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the feedback thread and the suggestion have the same
# pseudonymous user ID.
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
new_feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
new_feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_multiple_feedbacks_are_pseudonymized(self):
feedback_thread_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_thread_models.append(
feedback_models.GeneralFeedbackThreadModel(
id='feedback-%s' % i,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Too short exploration',
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
)
)
feedback_message_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_message_models.append(
feedback_models.GeneralFeedbackMessageModel(
id='message-%s' % i,
thread_id='feedback-%s' % i,
message_id=i,
author_id=self.user_1_id,
text='Some text'
)
)
base_models.BaseHumanMaintainedModel.put_multi_for_human(
feedback_thread_models + feedback_message_models)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
pseudonymized_feedback_thread_models = (
feedback_models.GeneralFeedbackThreadModel.get_multi(
[model.id for model in feedback_thread_models]
)
)
for feedback_thread_model in pseudonymized_feedback_thread_models:
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[feedback_thread_model.id]
)
pseudonymized_feedback_message_models = (
feedback_models.GeneralFeedbackMessageModel.get_multi(
[model.id for model in feedback_message_models]
)
)
for feedback_message_model in pseudonymized_feedback_message_models:
self.assertEqual(
feedback_message_model.author_id,
feedback_mappings[feedback_message_model.thread_id]
)
def test_one_feedback_with_multiple_users_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
# Verify first user is pseudonymized.
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings_1[self.FEEDBACK_1_ID]
)
# Verify second user is not yet pseudonymized.
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
self.user_2_id
)
# Delete second user.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
feedback_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
# Verify second user is pseudonymized.
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
feedback_mappings_2[self.FEEDBACK_1_ID]
)
class WipeoutServiceVerifyDeleteFeedbackModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
FEEDBACK_1_ID = 'feedback_1_id'
MESSAGE_1_ID = 'message_1_id'
EXP_1_ID = 'exp_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_1_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_1_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteImprovementsModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.improvements_model_1_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
self.improvements_model_2_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
def test_delete_user_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
class WipeoutServiceVerifyDeleteImprovementsModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
EXP_3_ID = 'exp_3_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_3_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteQuestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
QUESTION_1_ID = 'question_1_id'
QUESTION_2_ID = 'question_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_question_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_one_question_with_missing_snapshot_is_pseudonymized(self):
question_models.QuestionCommitLogEntryModel(
id='question-%s-1' % self.QUESTION_2_ID,
question_id=self.QUESTION_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'QuestionCommitLogEntryModel\' '
'and snapshot models [\'QuestionSnapshotMetadataModel\'] IDs '
'differ. Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.QUESTION_2_ID])
# Verify user is deleted.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_1 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_2 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, question_mappings[self.QUESTION_2_ID])
def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_multiple_questions_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_2_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_2_ID])
def test_multiple_questions_with_multiple_users_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
# Verify second user is not yet deleted.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_2_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_2_ID])
def test_one_question_with_multiple_users_is_pseudonymized(self):
question_services.update_question(
self.user_2_id,
self.QUESTION_1_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
# Verify second user is not yet deleted.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_1_ID])
class WipeoutServiceVerifyDeleteQuestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
SKILL_1_ID = 'SKILL_1_ID'
QUESTION_1_ID = 'QUESTION_1_ID'
QUESTION_2_ID = 'QUESTION_2_ID'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
question_services.update_question(
self.user_2_id,
self.QUESTION_2_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
class WipeoutServiceDeleteSkillModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_skill_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_one_skill_with_missing_snapshot_is_pseudonymized(self):
skill_models.SkillCommitLogEntryModel(
id='skill-%s-1' % self.SKILL_2_ID,
skill_id=self.SKILL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'SkillCommitLogEntryModel\' and '
'snapshot models [\'SkillSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SKILL_2_ID])
# Verify user is deleted.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_1 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model_1.user_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_2 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model_2.user_id, skill_mappings[self.SKILL_2_ID])
def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_multiple_skills_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_1_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_2_ID])
def test_multiple_skills_with_multiple_users_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
# Verify second user is not yet deleted.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_2_ID])
def test_one_skill_with_multiple_users_is_pseudonymized(self):
skill_services.update_skill(
self.user_2_id,
self.SKILL_1_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
# Verify second user is not yet deleted.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_1_ID])
class WipeoutServiceVerifyDeleteSkillModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
skill_services.update_skill(
self.user_2_id,
self.SKILL_2_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteStoryModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
TOPIC_1_ID = 'topic_1_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID,
self.user_1_id,
abbreviated_name='abbrev-one',
url_fragment='frag-one',
canonical_story_ids=[self.STORY_1_ID])
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_story_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_one_story_with_missing_snapshot_is_pseudonymized(self):
story_models.StoryCommitLogEntryModel(
id='story-%s-1' % self.STORY_2_ID,
story_id=self.STORY_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'StoryCommitLogEntryModel\' and '
'snapshot models [\'StorySnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.STORY_2_ID])
# Verify user is deleted.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model_1 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model_1.user_id, story_mappings[self.STORY_1_ID])
commit_log_model_2 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model_2.user_id, story_mappings[self.STORY_2_ID])
def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_multiple_stories_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, name='Topic 2',
abbreviated_name='abbrev-two', url_fragment='frag-two')
self.save_new_story(self.STORY_2_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_2_ID])
def test_multiple_stories_with_multiple_users_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_2_id, name='Topic 2',
abbreviated_name='abbrev-three', url_fragment='frag-three')
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
# Verify second user is not yet deleted.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_2_ID])
def test_one_story_with_multiple_users_is_pseudonymized(self):
story_services.update_story(
self.user_2_id,
self.STORY_1_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
# Verify second user is not yet deleted.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_1_ID])
class WipeoutServiceVerifyDeleteStoryModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
TOPIC_1_ID = 'topic_1_id'
TOPIC_2_ID = 'topic_2_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, abbreviated_name='abbrev-four',
url_fragment='frag-four')
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
self.save_new_topic(
self.TOPIC_2_ID,
self.user_2_id,
name='Topic 2',
abbreviated_name='abbrev-five',
url_fragment='frag-five',
canonical_story_ids=[self.STORY_2_ID])
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
story_services.update_story(
self.user_2_id,
self.STORY_2_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteSubtopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
SUBTOP_2_ID = 'subtop_2_id'
def setUp(self):
super(WipeoutServiceDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.subtopic_page = self.save_new_subtopic(
self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_subtopic_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self):
subtopic_models.SubtopicPageCommitLogEntryModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID),
subtopic_page_id=self.SUBTOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model '
'\'SubtopicPageCommitLogEntryModel\' and snapshot models '
'[\'SubtopicPageSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SUBTOP_2_ID])
# Verify user is deleted.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_multiple_subtopics_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_2_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
# Verify second user is not yet deleted.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_one_subtopic_with_multiple_users_is_pseudonymized(self):
subtopic_page_services.save_subtopic_page(
self.user_2_id,
self.subtopic_page,
'Change subtopic',
[
subtopic_page_domain.SubtopicPageChange({
'cmd': (
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY),
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'new_value': 'new value',
'old_value': 'old value',
'subtopic_id': self.SUBTOP_1_ID
})
]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
# Verify second user is not yet deleted.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
class WipeoutServiceVerifyDeleteSubtopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.save_new_subtopic(self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID),
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteSuggestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_voiceover_application_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
suggestion_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.suggestion]
)
# Verify user is pseudonymized.
voiceover_application_model_1 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_1_ID)
)
self.assertEqual(
voiceover_application_model_1.author_id,
suggestion_mappings[self.VOICEOVER_1_ID]
)
voiceover_application_model_2 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_2_ID)
)
self.assertEqual(
voiceover_application_model_2.final_reviewer_id,
suggestion_mappings[self.VOICEOVER_2_ID]
)
class WipeoutServiceVerifyDeleteSuggestionModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteTopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
def setUp(self):
super(WipeoutServiceDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_ADMIN)
user_services.update_user_role(
self.user_2_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.user_2_actions = user_services.UserActionsInfo(self.user_2_id)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
topic_services.assign_role(
self.user_1_actions,
self.user_1_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
topic_services.assign_role(
self.user_1_actions,
self.user_2_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
def test_one_topic_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
rights_metadata_model_1 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids, [])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[topic_mappings[self.TOP_1_ID]])
self.assertEqual(
rights_metadata_model_2.commit_cmds_user_ids,
[topic_mappings[self.TOP_1_ID]])
def test_one_topic_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
rights_content_model_1 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['manager_ids'], [])
rights_content_model_2 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-3' % self.TOP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['manager_ids'],
[
topic_mappings[self.TOP_1_ID],
self.user_2_id
])
def test_one_topic_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
def test_one_topic_with_missing_snapshot_is_pseudonymized(self):
topic_models.TopicCommitLogEntryModel(
id='topic-%s-1' % self.TOP_2_ID,
topic_id=self.TOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model \'TopicCommitLogEntryModel\' '
'and snapshot models [\'TopicSnapshotMetadataModel\', '
'\'TopicRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.TOP_2_ID
]
)
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
commit_log_model_2 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, topic_mappings[self.TOP_2_ID])
def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
def test_multiple_topics_are_pseudonymized(self):
self.save_new_topic(
self.TOP_2_ID,
self.user_1_id,
name='topic2',
url_fragment='topic-two')
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_2_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_2_ID])
class WipeoutServiceVerifyDeleteTopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
topic_models.TopicSnapshotMetadataModel(
id='%s-1' % self.TOP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteUserModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COLLECTION_1_ID = 'col_1_id'
COLLECTION_2_ID = 'col_2_id'
EXPLORATION_1_ID = 'exp_1_id'
EXPLORATION_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
def test_delete_user_for_profile_user_is_successful(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.profile_user_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.profile_user_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
def test_delete_user_for_full_user_and_its_profiles_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
# External auth associations should not have been deleted yet.
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collection_and_exploration_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collections_and_explorations_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.save_new_valid_exploration(
self.EXPLORATION_2_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_2_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
def test_delete_user_with_collection_and_exploration_repeated_is_successful(
self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.assertIsNotNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNotNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
def test_delete_user_with_multiple_users_is_successful(self):
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
def test_after_deletion_user_and_its_profiles_cannot_do_anything(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(user_services.get_user_settings(self.user_1_id))
self.assertIsNone(user_services.get_user_settings(self.profile_user_id))
with self.assertRaisesRegexp(Exception, 'User not found.'):
# Try to do some action with the deleted user.
user_services.update_preferred_language_codes(
self.user_1_id, ['en'])
with self.assertRaisesRegexp(Exception, 'User not found.'):
# Try to do some action with the deleted user.
user_services.update_preferred_language_codes(
self.profile_user_id, ['en'])
class WipeoutServiceVerifyDeleteUserModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_profile_user_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id)
)
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
def test_verify_user_delete_when_profile_user_not_deleted_is_false(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(
wipeout_service.verify_user_deleted(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_external_auth_associations_are_not_deleted(
self):
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
delete_external_auth_associations_swap = self.swap_to_always_return(
auth_services, 'delete_external_auth_associations')
with delete_external_auth_associations_swap:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
| 41.629367
| 80
| 0.676327
|
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import logging
from constants import constants
from core.domain import auth_services
from core.domain import collection_services
from core.domain import email_manager
from core.domain import exp_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_domain
from core.domain import user_services
from core.domain import wipeout_domain
from core.domain import wipeout_service
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(
auth_models, base_models, collection_models,
config_models, email_models, exp_models,
feedback_models, improvements_models, question_models,
skill_models, story_models, subtopic_models,
suggestion_models, topic_models, user_models
) = models.Registry.import_models([
models.NAMES.auth, models.NAMES.base_model, models.NAMES.collection,
models.NAMES.config, models.NAMES.email, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.improvements, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic,
models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user
])
datastore_services = models.Registry.import_datastore_services()
class WipeoutServiceHelpersTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceHelpersTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_1_role = user_services.get_user_settings(self.user_1_id).role
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_2_role = user_services.get_user_settings(self.user_2_id).role
def test_gets_pending_deletion_request(self):
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
]
)
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(pending_deletion_request.user_id, self.user_1_id)
self.assertEqual(pending_deletion_request.email, self.USER_1_EMAIL)
self.assertEqual(pending_deletion_request.deletion_complete, False)
self.assertEqual(
pending_deletion_request.pseudonymizable_entity_mappings, {})
def test_get_number_of_pending_deletion_requests_returns_correct_number(
self):
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 0)
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role),
wipeout_domain.PendingDeletionRequest.create_default(
self.user_2_id, self.USER_2_EMAIL, self.user_2_role)
]
)
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 2)
def test_saves_pending_deletion_request_when_new(self):
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role))
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model.deletion_complete, False)
self.assertEqual(
pending_deletion_request_model.pseudonymizable_entity_mappings, {})
def test_saves_pending_deletion_request_when_already_existing(self):
pending_deletion_request_model_old = (
user_models.PendingDeletionRequestModel(
id=self.user_1_id,
email=self.USER_1_EMAIL,
role=self.user_1_role,
deletion_complete=False,
pseudonymizable_entity_mappings={}
)
)
pending_deletion_request_model_old.put()
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
)
pending_deletion_request.deletion_complete = True
pending_deletion_request.pseudonymizable_entity_mappings = {
'story': {'story_id': 'user_id'}
}
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model_new = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model_new.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model_new.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model_new.deletion_complete, True)
self.assertEqual(
pending_deletion_request_model_new.pseudonymizable_entity_mappings,
{'story': {'story_id': 'user_id'}})
self.assertEqual(
pending_deletion_request_model_old.created_on,
pending_deletion_request_model_new.created_on)
class WipeoutServicePreDeleteTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
USER_3_EMAIL = 'other@email.com'
USER_3_USERNAME = 'username3'
def setUp(self):
super(WipeoutServicePreDeleteTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_user_data.display_alias = 'name'
self.modifiable_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
def tearDown(self):
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.get_all())
for pending_deletion_request_model in pending_deletion_request_models:
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(
pending_deletion_request_model.id))
self.assertEqual(
wipeout_service.run_user_deletion(pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS)
def test_pre_delete_user_email_subscriptions(self):
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertEqual(
email_preferences.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_pre_delete_profile_users_works_correctly(self):
user_settings = user_services.get_user_settings(self.profile_user_id)
self.assertFalse(user_settings.deleted)
self.assertFalse(user_settings.deleted)
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
def test_pre_delete_user_for_full_user_also_deletes_all_profiles(self):
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
profile_user_settings = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_user_settings.deleted)
profile_auth_details = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
profile_user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(profile_user_settings.deleted)
profile_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(profile_auth_details.deleted)
def test_pre_delete_user_without_activities_works_correctly(self):
user_models.UserSubscriptionsModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
user_auth_details = auth_models.UserAuthDetailsModel.get(self.user_1_id)
self.assertFalse(user_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_username_is_not_saved_for_user_younger_than_week(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
pending_deletion_request.normalized_long_term_username)
def test_pre_delete_username_is_saved_for_user_older_than_week(self):
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
user_3_id = self.get_user_id_from_email(self.USER_3_EMAIL)
wipeout_service.pre_delete_user(user_3_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(user_3_id))
self.assertEqual(
pending_deletion_request.normalized_long_term_username,
self.USER_3_USERNAME)
def test_pre_delete_user_with_activities_multiple_owners(self):
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR)
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
self.user_1_actions,
'exp_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
rights_manager.assign_role_for_collection(
self.user_1_actions,
'col_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_user_collection_is_marked_deleted(self):
self.save_new_valid_collection('col_id', self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertFalse(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(collection_models.CollectionModel.get_by_id('col_id'))
def test_pre_delete_user_exploration_is_marked_deleted(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertFalse(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(exp_models.ExplorationModel.get_by_id('exp_id'))
def test_pre_delete_user_collection_ownership_is_released(self):
self.save_new_valid_collection('col_id', self.user_1_id)
self.publish_collection(self.user_1_id, 'col_id')
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertFalse(collection_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertTrue(collection_summary_model.community_owned)
def test_pre_delete_user_exploration_ownership_is_released(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
self.publish_exploration(self.user_1_id, 'exp_id')
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertFalse(exp_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertTrue(exp_summary_model.community_owned)
def test_pre_delete_user_collection_user_is_deassigned(self):
self.save_new_valid_collection('col_id', self.user_1_id)
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [])
def test_pre_delete_user_exploration_user_is_deassigned(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [])
def test_pre_delete_user_user_is_deassigned_from_topics(self):
self.save_new_topic('top_id', self.user_1_id)
topic_services.assign_role(
user_services.get_system_user(),
self.user_1_actions,
feconf.ROLE_MANAGER,
'top_id')
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [self.user_1_id])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [])
class WipeoutServiceRunFunctionsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceRunFunctionsTests, self).setUp()
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
def test_run_user_deletion_with_user_not_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS
)
def test_run_user_deletion_with_user_already_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_ALREADY_DONE
)
def test_run_user_deletion_completion_with_user_not_yet_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_NOT_DELETED)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
def test_run_user_deletion_completion_with_user_properly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS
)
self.assertIsNotNone(
user_models.DeletedUserModel.get_by_id(self.user_1_id))
self.assertTrue(user_services.is_username_taken(self.USER_1_USERNAME))
self.assertIsNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
def test_run_user_deletion_completion_with_user_wrongly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
user_models.CompletedActivitiesModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
email_content = (
'The Wipeout process failed for the user with ID \'%s\' '
'and email \'%s\'.' % (self.user_1_id, self.USER_1_EMAIL)
)
send_email_swap = self.swap_with_checks(
email_manager,
'send_mail_to_admin',
lambda x, y: None,
expected_args=[('WIPEOUT: Account deletion failed', email_content)]
)
with send_email_swap:
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_FAILURE)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
auth_models.UserAuthDetailsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
class WipeoutServiceDeleteConfigModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
config_models.ConfigPropertyModel(
id=self.CONFIG_1_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_config_property_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_one_config_property_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_multiple_config_properties_are_pseudonymized(self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings[self.CONFIG_1_ID])
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, config_mappings[self.CONFIG_2_ID])
def test_multiple_config_properties_with_multiple_users_are_pseudonymized(
self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_2_ID])
def test_one_config_property_with_multiple_users_is_pseudonymized(self):
config_models.ConfigPropertyModel.get_by_id(
self.CONFIG_1_ID
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_1_ID])
class WipeoutServiceVerifyDeleteConfigModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
config_model = config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
)
config_model.commit(self.user_1_id, [{'cmd': 'command'}])
config_model.commit(self.user_1_id, [{'cmd': 'command_2'}])
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteCollectionModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_1_ID)
rights_manager.assign_role_for_collection(
user_services.UserActionsInfo(self.user_1_id),
self.COL_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_collection_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
rights_metadata_model_1 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_collection_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
rights_content_model_1 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[collection_mappings[self.COL_1_ID]])
rights_content_model_2 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-3' % self.COL_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
collection_mappings[self.COL_1_ID],
self.user_2_id
])
def test_one_collection_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_1_ID])
def test_one_collection_with_missing_snapshot_is_pseudonymized(self):
collection_models.CollectionCommitLogEntryModel(
id='collection-%s-1' % self.COL_2_ID,
collection_id=self.COL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'CollectionCommitLogEntryModel\' and '
'snapshot models [\'CollectionSnapshotMetadataModel\', '
'\'CollectionRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.COL_2_ID,
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'an_exploration_id\'].'
]
)
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_2_ID])
def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
def test_collection_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_col_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
collection_services.delete_collection(self.user_1_id, self.COL_2_ID)
collection_rights_model = (
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_rights_model.deleted)
collection_model = (
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
def test_multiple_collections_are_pseudonymized(self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_2_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_2_ID])
class WipeoutServiceVerifyDeleteCollectionModelsTests(
test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
collection_models.CollectionSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteExplorationModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_1_ID)
rights_manager.assign_role_for_exploration(
user_services.UserActionsInfo(self.user_1_id),
self.EXP_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_exploration_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
exploration_mappings[self.EXP_1_ID])
rights_metadata_model_1 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_exploration_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
rights_content_model_1 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[exploration_mappings[self.EXP_1_ID]])
rights_content_model_2 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-3' % self.EXP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
exploration_mappings[self.EXP_1_ID],
self.user_2_id
])
def test_one_exploration_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_1_ID])
def test_one_exploration_with_missing_snapshot_is_pseudonymized(self):
exp_models.ExplorationCommitLogEntryModel(
id='exploration-%s-1' % self.EXP_2_ID,
exploration_id=self.EXP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.EXP_2_ID
]
)
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_2_ID])
def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
def test_exploration_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_exp_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
exp_services.delete_exploration(self.user_1_id, self.EXP_2_ID)
exp_rights_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_rights_model.deleted)
exp_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXP_2_ID))
def test_multiple_explorations_are_pseudonymized(self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_2_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_2_ID])
class WipeoutServiceVerifyDeleteExplorationModelsTests(
test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
exp_models.ExplorationSnapshotMetadataModel(
id='%s-1' % self.EXP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteEmailModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_email_is_deleted(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
def test_multiple_emails_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_2_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_2_ID)))
def test_multiple_emails_from_multiple_users_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_2_id, self.THREAD_2_ID),
user_id=self.user_2_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNotNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
class WipeoutServiceVerifyDeleteEmailModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteFeedbackModelsTests(test_utils.GenericTestBase):
FEEDBACK_1_ID = 'feedback_1_id'
FEEDBACK_2_ID = 'feedback_2_id'
MESSAGE_1_ID = 'message_1_id'
MESSAGE_2_ID = 'message_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
NUMBER_OF_MODELS = 150
def setUp(self):
super(WipeoutServiceDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_2_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_feedback_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
suggestion_model_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
suggestion_model_model.author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
feedback_thread_model.original_author_id = self.user_1_id
feedback_thread_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
new_feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
new_feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_multiple_feedbacks_are_pseudonymized(self):
feedback_thread_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_thread_models.append(
feedback_models.GeneralFeedbackThreadModel(
id='feedback-%s' % i,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Too short exploration',
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
)
)
feedback_message_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_message_models.append(
feedback_models.GeneralFeedbackMessageModel(
id='message-%s' % i,
thread_id='feedback-%s' % i,
message_id=i,
author_id=self.user_1_id,
text='Some text'
)
)
base_models.BaseHumanMaintainedModel.put_multi_for_human(
feedback_thread_models + feedback_message_models)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
pseudonymized_feedback_thread_models = (
feedback_models.GeneralFeedbackThreadModel.get_multi(
[model.id for model in feedback_thread_models]
)
)
for feedback_thread_model in pseudonymized_feedback_thread_models:
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[feedback_thread_model.id]
)
pseudonymized_feedback_message_models = (
feedback_models.GeneralFeedbackMessageModel.get_multi(
[model.id for model in feedback_message_models]
)
)
for feedback_message_model in pseudonymized_feedback_message_models:
self.assertEqual(
feedback_message_model.author_id,
feedback_mappings[feedback_message_model.thread_id]
)
def test_one_feedback_with_multiple_users_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings_1[self.FEEDBACK_1_ID]
)
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
self.user_2_id
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
feedback_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
feedback_mappings_2[self.FEEDBACK_1_ID]
)
class WipeoutServiceVerifyDeleteFeedbackModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
FEEDBACK_1_ID = 'feedback_1_id'
MESSAGE_1_ID = 'message_1_id'
EXP_1_ID = 'exp_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_1_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_1_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteImprovementsModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.improvements_model_1_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
self.improvements_model_2_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
def test_delete_user_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
class WipeoutServiceVerifyDeleteImprovementsModelsTests(
test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
EXP_3_ID = 'exp_3_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_3_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteQuestionModelsTests(test_utils.GenericTestBase):
SKILL_1_ID = 'skill_1_id'
QUESTION_1_ID = 'question_1_id'
QUESTION_2_ID = 'question_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_question_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_one_question_with_missing_snapshot_is_pseudonymized(self):
question_models.QuestionCommitLogEntryModel(
id='question-%s-1' % self.QUESTION_2_ID,
question_id=self.QUESTION_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'QuestionCommitLogEntryModel\' '
'and snapshot models [\'QuestionSnapshotMetadataModel\'] IDs '
'differ. Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.QUESTION_2_ID])
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_1 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_2 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, question_mappings[self.QUESTION_2_ID])
def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_multiple_questions_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_2_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_2_ID])
def test_multiple_questions_with_multiple_users_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_2_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_2_ID])
def test_one_question_with_multiple_users_is_pseudonymized(self):
question_services.update_question(
self.user_2_id,
self.QUESTION_1_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_1_ID])
class WipeoutServiceVerifyDeleteQuestionModelsTests(test_utils.GenericTestBase):
SKILL_1_ID = 'SKILL_1_ID'
QUESTION_1_ID = 'QUESTION_1_ID'
QUESTION_2_ID = 'QUESTION_2_ID'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
question_services.update_question(
self.user_2_id,
self.QUESTION_2_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
class WipeoutServiceDeleteSkillModelsTests(test_utils.GenericTestBase):
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_skill_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_one_skill_with_missing_snapshot_is_pseudonymized(self):
skill_models.SkillCommitLogEntryModel(
id='skill-%s-1' % self.SKILL_2_ID,
skill_id=self.SKILL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'SkillCommitLogEntryModel\' and '
'snapshot models [\'SkillSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SKILL_2_ID])
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_1 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model_1.user_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_2 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model_2.user_id, skill_mappings[self.SKILL_2_ID])
def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_multiple_skills_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_1_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_2_ID])
def test_multiple_skills_with_multiple_users_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_2_ID])
def test_one_skill_with_multiple_users_is_pseudonymized(self):
skill_services.update_skill(
self.user_2_id,
self.SKILL_1_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_1_ID])
class WipeoutServiceVerifyDeleteSkillModelsTests(test_utils.GenericTestBase):
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
skill_services.update_skill(
self.user_2_id,
self.SKILL_2_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteStoryModelsTests(test_utils.GenericTestBase):
TOPIC_1_ID = 'topic_1_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID,
self.user_1_id,
abbreviated_name='abbrev-one',
url_fragment='frag-one',
canonical_story_ids=[self.STORY_1_ID])
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_story_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_one_story_with_missing_snapshot_is_pseudonymized(self):
story_models.StoryCommitLogEntryModel(
id='story-%s-1' % self.STORY_2_ID,
story_id=self.STORY_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'StoryCommitLogEntryModel\' and '
'snapshot models [\'StorySnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.STORY_2_ID])
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model_1 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model_1.user_id, story_mappings[self.STORY_1_ID])
commit_log_model_2 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model_2.user_id, story_mappings[self.STORY_2_ID])
def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_multiple_stories_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, name='Topic 2',
abbreviated_name='abbrev-two', url_fragment='frag-two')
self.save_new_story(self.STORY_2_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_2_ID])
def test_multiple_stories_with_multiple_users_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_2_id, name='Topic 2',
abbreviated_name='abbrev-three', url_fragment='frag-three')
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_2_ID])
def test_one_story_with_multiple_users_is_pseudonymized(self):
story_services.update_story(
self.user_2_id,
self.STORY_1_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_1_ID])
class WipeoutServiceVerifyDeleteStoryModelsTests(test_utils.GenericTestBase):
TOPIC_1_ID = 'topic_1_id'
TOPIC_2_ID = 'topic_2_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, abbreviated_name='abbrev-four',
url_fragment='frag-four')
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
self.save_new_topic(
self.TOPIC_2_ID,
self.user_2_id,
name='Topic 2',
abbreviated_name='abbrev-five',
url_fragment='frag-five',
canonical_story_ids=[self.STORY_2_ID])
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
story_services.update_story(
self.user_2_id,
self.STORY_2_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteSubtopicModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
SUBTOP_2_ID = 'subtop_2_id'
def setUp(self):
super(WipeoutServiceDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.subtopic_page = self.save_new_subtopic(
self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_subtopic_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self):
subtopic_models.SubtopicPageCommitLogEntryModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID),
subtopic_page_id=self.SUBTOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model '
'\'SubtopicPageCommitLogEntryModel\' and snapshot models '
'[\'SubtopicPageSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SUBTOP_2_ID])
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_multiple_subtopics_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_2_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_one_subtopic_with_multiple_users_is_pseudonymized(self):
subtopic_page_services.save_subtopic_page(
self.user_2_id,
self.subtopic_page,
'Change subtopic',
[
subtopic_page_domain.SubtopicPageChange({
'cmd': (
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY),
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'new_value': 'new value',
'old_value': 'old value',
'subtopic_id': self.SUBTOP_1_ID
})
]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
class WipeoutServiceVerifyDeleteSubtopicModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.save_new_subtopic(self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID),
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteSuggestionModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_voiceover_application_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
suggestion_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.suggestion]
)
voiceover_application_model_1 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_1_ID)
)
self.assertEqual(
voiceover_application_model_1.author_id,
suggestion_mappings[self.VOICEOVER_1_ID]
)
voiceover_application_model_2 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_2_ID)
)
self.assertEqual(
voiceover_application_model_2.final_reviewer_id,
suggestion_mappings[self.VOICEOVER_2_ID]
)
class WipeoutServiceVerifyDeleteSuggestionModelsTests(
test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteTopicModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
def setUp(self):
super(WipeoutServiceDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_ADMIN)
user_services.update_user_role(
self.user_2_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.user_2_actions = user_services.UserActionsInfo(self.user_2_id)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
topic_services.assign_role(
self.user_1_actions,
self.user_1_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
topic_services.assign_role(
self.user_1_actions,
self.user_2_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
def test_one_topic_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
rights_metadata_model_1 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids, [])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[topic_mappings[self.TOP_1_ID]])
self.assertEqual(
rights_metadata_model_2.commit_cmds_user_ids,
[topic_mappings[self.TOP_1_ID]])
def test_one_topic_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
rights_content_model_1 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['manager_ids'], [])
rights_content_model_2 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-3' % self.TOP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['manager_ids'],
[
topic_mappings[self.TOP_1_ID],
self.user_2_id
])
def test_one_topic_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
def test_one_topic_with_missing_snapshot_is_pseudonymized(self):
topic_models.TopicCommitLogEntryModel(
id='topic-%s-1' % self.TOP_2_ID,
topic_id=self.TOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model \'TopicCommitLogEntryModel\' '
'and snapshot models [\'TopicSnapshotMetadataModel\', '
'\'TopicRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.TOP_2_ID
]
)
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
commit_log_model_2 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, topic_mappings[self.TOP_2_ID])
def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
def test_multiple_topics_are_pseudonymized(self):
self.save_new_topic(
self.TOP_2_ID,
self.user_1_id,
name='topic2',
url_fragment='topic-two')
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_2_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_2_ID])
class WipeoutServiceVerifyDeleteTopicModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
topic_models.TopicSnapshotMetadataModel(
id='%s-1' % self.TOP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteUserModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COLLECTION_1_ID = 'col_1_id'
COLLECTION_2_ID = 'col_2_id'
EXPLORATION_1_ID = 'exp_1_id'
EXPLORATION_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
def test_delete_user_for_profile_user_is_successful(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.profile_user_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.profile_user_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
def test_delete_user_for_full_user_and_its_profiles_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collection_and_exploration_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collections_and_explorations_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.save_new_valid_exploration(
self.EXPLORATION_2_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_2_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
def test_delete_user_with_collection_and_exploration_repeated_is_successful(
self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.assertIsNotNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNotNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
def test_delete_user_with_multiple_users_is_successful(self):
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
def test_after_deletion_user_and_its_profiles_cannot_do_anything(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(user_services.get_user_settings(self.user_1_id))
self.assertIsNone(user_services.get_user_settings(self.profile_user_id))
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.update_preferred_language_codes(
self.user_1_id, ['en'])
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.update_preferred_language_codes(
self.profile_user_id, ['en'])
class WipeoutServiceVerifyDeleteUserModelsTests(test_utils.GenericTestBase):
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_profile_user_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id)
)
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
def test_verify_user_delete_when_profile_user_not_deleted_is_false(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(
wipeout_service.verify_user_deleted(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_external_auth_associations_are_not_deleted(
self):
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
delete_external_auth_associations_swap = self.swap_to_always_return(
auth_services, 'delete_external_auth_associations')
with delete_external_auth_associations_swap:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
| true
| true
|
790770e901bb26f50dd54f9075e5f71496273ebc
| 483
|
py
|
Python
|
market_place/users/migrations/0003_auto_20160708_0036.py
|
otherland8/market-place
|
ebf21a77cf9b3998e270ebd2d4422d7ce997e472
|
[
"MIT"
] | null | null | null |
market_place/users/migrations/0003_auto_20160708_0036.py
|
otherland8/market-place
|
ebf21a77cf9b3998e270ebd2d4422d7ce997e472
|
[
"MIT"
] | null | null | null |
market_place/users/migrations/0003_auto_20160708_0036.py
|
otherland8/market-place
|
ebf21a77cf9b3998e270ebd2d4422d7ce997e472
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-07 21:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20160706_2232'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='picture_path',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| 23
| 74
| 0.6294
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20160706_2232'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='picture_path',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| true
| true
|
7907712acf0f4f60945654d9a3fc61cc31231794
| 3,502
|
py
|
Python
|
workstation-backend/workstation/settings.py
|
cindy21td/WorkStation
|
fd8cfd86d7538f55cfbeee8e17c273546691f5a8
|
[
"MIT"
] | null | null | null |
workstation-backend/workstation/settings.py
|
cindy21td/WorkStation
|
fd8cfd86d7538f55cfbeee8e17c273546691f5a8
|
[
"MIT"
] | null | null | null |
workstation-backend/workstation/settings.py
|
cindy21td/WorkStation
|
fd8cfd86d7538f55cfbeee8e17c273546691f5a8
|
[
"MIT"
] | null | null | null |
"""
Django settings for workstation project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from dotenv import load_dotenv
from pathlib import Path
import os
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.getenv("SECRET_KEY"))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"account",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
}
ROOT_URLCONF = "workstation.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "workstation.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 25.75
| 91
| 0.705026
|
from dotenv import load_dotenv
from pathlib import Path
import os
load_dotenv()
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = str(os.getenv("SECRET_KEY"))
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"account",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
}
ROOT_URLCONF = "workstation.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "workstation.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| true
| true
|
7907713a1697df73607a8f7af9e4231797bc9ceb
| 6,830
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
from datetime import datetime
import re
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
import pandas._testing as tm
def test_parse_time_string():
(parsed, reso) = parse_time_string("4Q1984")
(parsed_lower, reso_lower) = parse_time_string("4q1984")
assert reso == reso_lower
assert parsed == parsed_lower
def test_parse_time_string_invalid_type():
# Raise on invalid input, don't just return it
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((4, 5))
@pytest.mark.parametrize(
"dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")]
)
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(parsed_dash, reso_dash) = parse_time_string(dashed)
(parsed, reso) = parse_time_string(normal)
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])
def test_parse_time_quarter_with_dash_error(dashed):
msg = f"Unknown datetime string format, unable to parse: {dashed}"
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize(
"date_string,expected",
[
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True),
],
)
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize(
"date_str,kwargs,msg",
[
(
"2013Q5",
dict(),
(
"Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5"
),
),
# see gh-5418
(
"2013Q1",
dict(freq="INVLD-L-DEC-SAT"),
(
"Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"
),
),
],
)
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize(
"date_str,freq,expected",
[
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1)),
],
)
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize(
"date_str", ["2Q 2005", "2Q-200A", "2Q-200", "22Q2005", "2Q200.", "6Q-20"]
)
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = (
"Incorrect quarterly string is given, quarter "
f"must be between 1 and 4: {date_str}"
)
else:
msg = f"Unknown datetime string format, unable to parse: {date_str}"
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize(
"date_str,expected",
[("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],
)
def test_parsers_month_freq(date_str, expected):
result, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize(
"string,fmt",
[
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"),
],
)
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize(
"string,fmt",
[
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"),
],
)
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize(
"invalid_dt",
[
"2013",
"01/2013",
"12:00:00",
"1/1/1/1",
"this_is_not_a_datetime",
"51a",
9,
datetime(2011, 1, 1),
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize(
"string,fmt",
[
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S"),
],
)
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
def test_parse_time_string_check_instance_type_raise_exception():
# issue 20684
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((1, 2, 3))
result = parse_time_string("2019")
expected = (datetime(2019, 1, 1), "year")
assert result == expected
| 29.825328
| 89
| 0.599122
|
from datetime import datetime
import re
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
import pandas._testing as tm
def test_parse_time_string():
(parsed, reso) = parse_time_string("4Q1984")
(parsed_lower, reso_lower) = parse_time_string("4q1984")
assert reso == reso_lower
assert parsed == parsed_lower
def test_parse_time_string_invalid_type():
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((4, 5))
@pytest.mark.parametrize(
"dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")]
)
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(parsed_dash, reso_dash) = parse_time_string(dashed)
(parsed, reso) = parse_time_string(normal)
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])
def test_parse_time_quarter_with_dash_error(dashed):
msg = f"Unknown datetime string format, unable to parse: {dashed}"
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize(
"date_string,expected",
[
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True),
],
)
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize(
"date_str,kwargs,msg",
[
(
"2013Q5",
dict(),
(
"Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5"
),
),
# see gh-5418
(
"2013Q1",
dict(freq="INVLD-L-DEC-SAT"),
(
"Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"
),
),
],
)
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize(
"date_str,freq,expected",
[
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1)),
],
)
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize(
"date_str", ["2Q 2005", "2Q-200A", "2Q-200", "22Q2005", "2Q200.", "6Q-20"]
)
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = (
"Incorrect quarterly string is given, quarter "
f"must be between 1 and 4: {date_str}"
)
else:
msg = f"Unknown datetime string format, unable to parse: {date_str}"
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize(
"date_str,expected",
[("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],
)
def test_parsers_month_freq(date_str, expected):
result, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize(
"string,fmt",
[
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"),
],
)
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize(
"string,fmt",
[
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"),
],
)
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize(
"invalid_dt",
[
"2013",
"01/2013",
"12:00:00",
"1/1/1/1",
"this_is_not_a_datetime",
"51a",
9,
datetime(2011, 1, 1),
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize(
"string,fmt",
[
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S"),
],
)
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
def test_parse_time_string_check_instance_type_raise_exception():
# issue 20684
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((1, 2, 3))
result = parse_time_string("2019")
expected = (datetime(2019, 1, 1), "year")
assert result == expected
| true
| true
|
79077298425f95eefa6233036c8a6843840ee10f
| 2,515
|
py
|
Python
|
plot_conditionals_with_tis.py
|
samiemostafavi/conditional-latency-probability-prediction
|
a196f2db8c6f30f8613797b6a23bffd77a01e1e3
|
[
"MIT"
] | null | null | null |
plot_conditionals_with_tis.py
|
samiemostafavi/conditional-latency-probability-prediction
|
a196f2db8c6f30f8613797b6a23bffd77a01e1e3
|
[
"MIT"
] | null | null | null |
plot_conditionals_with_tis.py
|
samiemostafavi/conditional-latency-probability-prediction
|
a196f2db8c6f30f8613797b6a23bffd77a01e1e3
|
[
"MIT"
] | null | null | null |
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.compute as pc
import matplotlib.pyplot as plt
import seaborn as sns
from pr3d.nonbayesian import ConditionalGammaEVM
# load dataset first
file_addresses = ['dataset_onehop_processed.parquet']
table = pa.concat_tables(
pq.read_table(
file_address,columns=None,
) for file_address in file_addresses
)
df = table.to_pandas()
print(df)
# load the trained model
dtype = 'float64'
conditional_delay_model = ConditionalGammaEVM(
h5_addr = "onehop_tis_model.h5",
)
# find n most common queue_length occurances
n = 3
values_count = df[['queue_length']].value_counts()[:n].index.tolist()
print("{0} most common queue states: {1}".format(n,values_count))
# divide the service delay into n segments based on quantiles
m = 5
service_delays = np.squeeze(df[['service_delay']].to_numpy())
quants = np.linspace(0, 1, num=m+1)
intervals = [ (quant,quants[idx+1]) for idx, quant in enumerate(quants) if (idx+1)<len(quants) ]
print("{0} longer_delay_prob intervals: {1}".format(n,intervals))
#sns.set_palette("rocket")
# plot the conditional distributions of them
fig, axes = plt.subplots(nrows=n, ncols=m, figsize=(m*4,n*4))
for i in range(n):
for j in range(m):
ax = axes[i,j]
# take the empirical samples
conditional_df = df[
(df.queue_length==values_count[i][0]) &
(df.longer_delay_prob>=intervals[j][0]) &
(df.longer_delay_prob<intervals[j][1])
]
# sample the predictor with x (conditions) from the empirical data
X = np.squeeze(conditional_df[['queue_length','longer_delay_prob']].to_numpy())
conditional_samples = conditional_delay_model.sample_n(
x = X,
random_generator=np.random.default_rng(0),
)
# insert it to the dataset
conditional_df['predicted distribution'] = conditional_samples
conditional_df.rename(columns = {'end2end_delay':'empirical distribution'}, inplace = True)
# plot
sns.histplot(
conditional_df[['empirical distribution','predicted distribution']],
kde=True,
ax=ax,
stat="density",
).set(title="x={}, interval={}, count={}".format(
values_count[i],
["{:0.2f}".format(inter) for inter in intervals[j]],
len(conditional_df))
)
ax.title.set_size(10)
fig.tight_layout()
plt.savefig('conditional_delay_tis.png')
| 31.049383
| 99
| 0.662425
|
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.compute as pc
import matplotlib.pyplot as plt
import seaborn as sns
from pr3d.nonbayesian import ConditionalGammaEVM
file_addresses = ['dataset_onehop_processed.parquet']
table = pa.concat_tables(
pq.read_table(
file_address,columns=None,
) for file_address in file_addresses
)
df = table.to_pandas()
print(df)
dtype = 'float64'
conditional_delay_model = ConditionalGammaEVM(
h5_addr = "onehop_tis_model.h5",
)
n = 3
values_count = df[['queue_length']].value_counts()[:n].index.tolist()
print("{0} most common queue states: {1}".format(n,values_count))
m = 5
service_delays = np.squeeze(df[['service_delay']].to_numpy())
quants = np.linspace(0, 1, num=m+1)
intervals = [ (quant,quants[idx+1]) for idx, quant in enumerate(quants) if (idx+1)<len(quants) ]
print("{0} longer_delay_prob intervals: {1}".format(n,intervals))
fig, axes = plt.subplots(nrows=n, ncols=m, figsize=(m*4,n*4))
for i in range(n):
for j in range(m):
ax = axes[i,j]
conditional_df = df[
(df.queue_length==values_count[i][0]) &
(df.longer_delay_prob>=intervals[j][0]) &
(df.longer_delay_prob<intervals[j][1])
]
X = np.squeeze(conditional_df[['queue_length','longer_delay_prob']].to_numpy())
conditional_samples = conditional_delay_model.sample_n(
x = X,
random_generator=np.random.default_rng(0),
)
conditional_df['predicted distribution'] = conditional_samples
conditional_df.rename(columns = {'end2end_delay':'empirical distribution'}, inplace = True)
sns.histplot(
conditional_df[['empirical distribution','predicted distribution']],
kde=True,
ax=ax,
stat="density",
).set(title="x={}, interval={}, count={}".format(
values_count[i],
["{:0.2f}".format(inter) for inter in intervals[j]],
len(conditional_df))
)
ax.title.set_size(10)
fig.tight_layout()
plt.savefig('conditional_delay_tis.png')
| true
| true
|
790772e2333aaf3d9ac9b0a5be9f73acb7efcb30
| 353
|
py
|
Python
|
wagtailstreamforms/utils/requests.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | null | null | null |
wagtailstreamforms/utils/requests.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | null | null | null |
wagtailstreamforms/utils/requests.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | 1
|
2020-05-13T16:26:38.000Z
|
2020-05-13T16:26:38.000Z
|
from wagtailstreamforms.models import Form
def get_form_instance_from_request(request):
""" Get the form class from the request. """
form_id = request.POST.get("form_id")
if form_id and form_id.isdigit():
try:
return Form.objects.get(pk=int(form_id))
except Form.DoesNotExist:
pass
return None
| 25.214286
| 52
| 0.657224
|
from wagtailstreamforms.models import Form
def get_form_instance_from_request(request):
form_id = request.POST.get("form_id")
if form_id and form_id.isdigit():
try:
return Form.objects.get(pk=int(form_id))
except Form.DoesNotExist:
pass
return None
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.