hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f724d089bc635ac3025f0392ab99da036fdef499 | 3,249 | py | Python | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | main.py | deeso/slow-hitter | 1fd3c7effaf532a828f30908715157b188ef5884 | [
"Apache-2.0"
] | null | null | null | import logging
import argparse
import sys
from slow.hitter import HitterService as Hitter
from slow.hitter import KnownHosts
from slow.etl import ETL, DEFAULT_NAMES, DEFAULT_PATTERNS, DEFAULT_CONFIG
from slow.mongo_backend import MongoConnection
parser = argparse.ArgumentParser(description='Start syslog-grok-mongo captures.')
parser.add_argument('-name', type=str, default=Hitter.NAME,
help='name of the service')
# Mongo configs
parser.add_argument('-muri', type=str, default='mongo://127.0.0.1:27017',
help='mongo uri')
parser.add_argument('-mdb', type=str, default=MongoConnection.DB_NAME,
help='mongo db name')
# ETL stuff
parser.add_argument('-cpdir', type=str, default=DEFAULT_PATTERNS,
help='directory containing custom grok patterns directory')
parser.add_argument('-names', type=str, default=DEFAULT_NAMES,
help='file containing all the names for rule patterns')
parser.add_argument('-gconfig', type=str, default=DEFAULT_CONFIG,
help='Grok frontend configuration for rule chains')
# Hitter stuff
parser.add_argument('-broker_uri', type=str, default=Hitter.BROKER_URI,
help='kombu queue address')
parser.add_argument('-broker_queue', type=str, default=Hitter.BROKER_QUEUE,
help='kombu queue name to publish to')
parser.add_argument('-buffer_uri', type=str, default=Hitter.BROKER_URI,
help='buffer uri for results')
parser.add_argument('-buffer_queue', type=str, default=Hitter.LOGSTASH_QUEUE,
help='kombu queue for results')
parser.add_argument('-known_hosts', type=str, default=KnownHosts.HOST_FILE,
help='hosts file to load')
parser.add_argument('-msg_limit', type=int, default=100,
help='limit the number of messages')
V = 'log levels: INFO: %d, DEBUG: %d, WARRNING: %d' % (logging.INFO,
logging.DEBUG,
logging.WARNING)
parser.add_argument('-log_level', type=int, default=logging.DEBUG,
help=V)
if __name__ == "__main__":
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
mongo_backend = MongoConnection(uri=args.muri,
db_name=args.mdb)
ETL.setup_grokker(args)
etl_backend = ETL
service = Hitter(broker_uri=args.broker_uri,
broker_queue=args.broker_queue,
hosts_file=args.known_hosts,
mongo_backend=mongo_backend,
etl_backend=etl_backend,
store_uri=args.buffer_uri,
store_queue=args.buffer_queue,
msg_limit=args.msg_limit)
try:
logging.debug("Starting the syslog listener")
service.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
raise
| 39.621951 | 81 | 0.634349 | import logging
import argparse
import sys
from slow.hitter import HitterService as Hitter
from slow.hitter import KnownHosts
from slow.etl import ETL, DEFAULT_NAMES, DEFAULT_PATTERNS, DEFAULT_CONFIG
from slow.mongo_backend import MongoConnection
parser = argparse.ArgumentParser(description='Start syslog-grok-mongo captures.')
parser.add_argument('-name', type=str, default=Hitter.NAME,
help='name of the service')
parser.add_argument('-muri', type=str, default='mongo://127.0.0.1:27017',
help='mongo uri')
parser.add_argument('-mdb', type=str, default=MongoConnection.DB_NAME,
help='mongo db name')
parser.add_argument('-cpdir', type=str, default=DEFAULT_PATTERNS,
help='directory containing custom grok patterns directory')
parser.add_argument('-names', type=str, default=DEFAULT_NAMES,
help='file containing all the names for rule patterns')
parser.add_argument('-gconfig', type=str, default=DEFAULT_CONFIG,
help='Grok frontend configuration for rule chains')
parser.add_argument('-broker_uri', type=str, default=Hitter.BROKER_URI,
help='kombu queue address')
parser.add_argument('-broker_queue', type=str, default=Hitter.BROKER_QUEUE,
help='kombu queue name to publish to')
parser.add_argument('-buffer_uri', type=str, default=Hitter.BROKER_URI,
help='buffer uri for results')
parser.add_argument('-buffer_queue', type=str, default=Hitter.LOGSTASH_QUEUE,
help='kombu queue for results')
parser.add_argument('-known_hosts', type=str, default=KnownHosts.HOST_FILE,
help='hosts file to load')
parser.add_argument('-msg_limit', type=int, default=100,
help='limit the number of messages')
V = 'log levels: INFO: %d, DEBUG: %d, WARRNING: %d' % (logging.INFO,
logging.DEBUG,
logging.WARNING)
parser.add_argument('-log_level', type=int, default=logging.DEBUG,
help=V)
if __name__ == "__main__":
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
mongo_backend = MongoConnection(uri=args.muri,
db_name=args.mdb)
ETL.setup_grokker(args)
etl_backend = ETL
service = Hitter(broker_uri=args.broker_uri,
broker_queue=args.broker_queue,
hosts_file=args.known_hosts,
mongo_backend=mongo_backend,
etl_backend=etl_backend,
store_uri=args.buffer_uri,
store_queue=args.buffer_queue,
msg_limit=args.msg_limit)
try:
logging.debug("Starting the syslog listener")
service.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
raise
| true | true |
f724d09925aef79360ace23f3ceeeecc66e5dc5d | 21,173 | py | Python | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | infra/libs/gerrit_api/test/gerrit_api_test.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for gerrit_api.py"""
import copy
import json
import mock
import requests
import tempfile
import time
import unittest
from infra.libs import gerrit_api
GERRIT_JSON_HEADER = ')]}\'\n'
HEADERS = {
'Accept': 'application/json',
'Accept-encoding': 'gzip',
'Authorization': 'Basic Z2l0LWNvbW1pdC1ib3RAY2hyb21pdW0ub3JnOnNlY3JldA==',
}
HEADERS_WITH_CONTENT_TYPE = HEADERS.copy()
HEADERS_WITH_CONTENT_TYPE['Content-Type'] = 'application/json;charset=UTF-8'
TEST_PAYLOAD = {
'labels': {
'Code-Review': 1,
},
'message': 'Test message.',
'notify': 'NONE',
}
TEST_PAYLOAD_LABELS_ONLY = {
'labels': {
'Code-Review': 1,
},
'notify': 'OWNER',
}
TEST_CHANGE_INFO = {
'id': 'project~branch~12345~change',
'change_id': 12345,
'created': '2014-02-11 12:14:28.135200000',
'updated': '2014-03-11 00:20:08.946000000',
'current_revision': 'THIRD',
'owner': {
'name': 'Some Person',
},
'revisions': {
'THIRD': {
'_number': 3,
},
'SECOND': {
'_number': 2,
},
'FIRST': {
'_number': 1,
},
},
'labels': {
'Commit-Queue': {
'recommended': { '_account_id': 1 }
},
'Test-Label': {
'disliked': { '_account_id' : 42 }
},
'Code-Review': {
'approved': { '_account_id': 2 }
},
},
'messages': [
{
'id': 1,
'author': 'test-user@test.org',
'date': '2014-02-11 12:10:14.311200000',
'message': 'MESSAGE1',
},
{
'id': 2,
'date': '2014-02-11 12:11:14.311200000',
'message': 'MESSAGE2',
'_revision_number': 2,
},
],
}
MOCK_AUTH=('git-commit-bot@chromium.org', 'secret')
def _create_mock_return(content, code):
r = requests.Response()
r._content = content
r.status_code = code
return r
# TODO(akuegel): Add more test cases and remove the pragma no covers.
class GerritAgentTestCase(unittest.TestCase):
def setUp(self):
self.gerrit = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH))
self.gerrit_read_only = gerrit_api.Gerrit(
'chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
read_only=True)
@mock.patch.object(requests.Session, 'request')
def test_request_no_leading_slash(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s[]' % GERRIT_JSON_HEADER, 200)
result = self.gerrit._request(method='GET',
request_path='changes/?q=query:no_results')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'?q=query:no_results'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, (200, []))
@mock.patch.object(gerrit_api.Gerrit, '_sleep')
@mock.patch.object(time, 'time')
@mock.patch.object(requests.Session, 'request')
def test_request_throttled(self, mock_method, time_mock_method, sleep_mock):
gerrit_throttled = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
0.1)
mock_method.return_value = _create_mock_return(None, 404)
time_mock_method.return_value = 100
gerrit_throttled._request(method='GET', request_path='/accounts/self')
# Call it twice to test the throttling.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
sleep_mock.assert_called_once_with(0)
time_mock_method.return_value = 101
# Call it again after exceeding the throttle to cover the other branch.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
@mock.patch.object(requests.Session, 'request')
def test_get_account(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s{"_account_id":1000096,"name":"John Doe","email":'
'"john.doe@test.com","username":"john"}') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.get_account('self')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url='https://chromium-review.googlesource.com/a/accounts/self',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = {
'_account_id': 1000096,
'name': 'John Doe',
'email': 'john.doe@test.com',
'username': 'john'
}
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_get_account_404(self, mock_method):
mock_method.return_value = _create_mock_return(None, 404)
result = self.gerrit.get_account('does.not@exist.com')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com'
'/a/accounts/does.not@exist.com'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_account_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 201)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_account, 'self')
@mock.patch.object(requests.Session, 'request')
def test_list_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.list_group_members('test-group')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_list_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.list_group_members, 'test-group')
def test_list_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.list_group_members, 'a/b/c')
@mock.patch.object(requests.Session, 'request')
def test_add_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.add_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.add'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_add_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.add_group_members, 'test-group', ['a@b.com'])
def test_add_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.add_group_members, 'a/b/c', [])
def test_add_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.add_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
204)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.delete_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.delete'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(
gerrit_api.UnexpectedResponseException,
self.gerrit.delete_group_members, 'test-group', ['a@b.com'])
def test_delete_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.delete_group_members, 'a/b/c', [])
def test_delete_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.delete_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s"parent"' % GERRIT_JSON_HEADER, 200)
result = self.gerrit.set_project_parent('project', 'parent')
payload = {
'parent': 'parent',
'commit_message': 'Changing parent project to parent'
}
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='PUT',
params=None,
url=('https://chromium-review.googlesource.com/a/projects/'
'project/parent'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, 'parent')
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_project_parent, 'a', 'b')
@mock.patch.object(requests.Session, 'request')
def test_query(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test',
with_labels=False, with_revisions=False,
owner='test@chromium.org')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test owner:test@chromium.org', 'o': ['MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_with_query_name(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test', query_name='pending_cls',
owner='1012155')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test query:pending_cls owner:1012155',
'o': ['CURRENT_REVISION', 'LABELS', 'MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.query, 'a', with_messages=False,
with_labels=False, with_revisions=False)
@mock.patch.object(requests.Session, 'request')
def test_get_issue(self, mock_method):
# By default, Gerrit doesn't return revisions data.
info_without_revisions = TEST_CHANGE_INFO.copy()
info_without_revisions.pop('revisions')
info_without_revisions.pop('current_revision')
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_without_revisions)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_without_revisions)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files(self, mock_method):
info_with_files = copy.deepcopy(TEST_CHANGE_INFO)
current = info_with_files['current_revision']
info_with_files['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_with_files)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True)
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'CURRENT_REVISION']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_with_files)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files_and_revisions(self, mock_method):
info = copy.deepcopy(TEST_CHANGE_INFO)
current = info['current_revision']
info['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True,
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_all_revisions(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(TEST_CHANGE_INFO)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, TEST_CHANGE_INFO)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_not_found(self, mock_method):
mock_method.return_value = _create_mock_return('Not found', 404)
result = self.gerrit.get_issue('unknown~branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'unknown~branch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_issue, 'issue')
@mock.patch.object(requests.Session, 'request')
def test_set_review(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id', 'Test message.',
{ 'Code-Review': 1 })
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_only_label(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id',
labels={ 'Code-Review': 1 }, notify='OWNER')
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD_LABELS_ONLY),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_review, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'status': 'MERGE'})), 200)
self.gerrit.submit_revision('change_id', 'current_revision_id')
mock_method.assert_called_once_with(
data=json.dumps({'wait_for_merge': True}),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/current_revision_id/submit'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_revision_conflict(self, mock_method):
mock_method.return_value = _create_mock_return(
'revision revision_id is not current revision', 409)
self.assertRaises(gerrit_api.RevisionConflictException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
| 39.064576 | 80 | 0.654277 |
import copy
import json
import mock
import requests
import tempfile
import time
import unittest
from infra.libs import gerrit_api
GERRIT_JSON_HEADER = ')]}\'\n'
HEADERS = {
'Accept': 'application/json',
'Accept-encoding': 'gzip',
'Authorization': 'Basic Z2l0LWNvbW1pdC1ib3RAY2hyb21pdW0ub3JnOnNlY3JldA==',
}
HEADERS_WITH_CONTENT_TYPE = HEADERS.copy()
HEADERS_WITH_CONTENT_TYPE['Content-Type'] = 'application/json;charset=UTF-8'
TEST_PAYLOAD = {
'labels': {
'Code-Review': 1,
},
'message': 'Test message.',
'notify': 'NONE',
}
TEST_PAYLOAD_LABELS_ONLY = {
'labels': {
'Code-Review': 1,
},
'notify': 'OWNER',
}
TEST_CHANGE_INFO = {
'id': 'project~branch~12345~change',
'change_id': 12345,
'created': '2014-02-11 12:14:28.135200000',
'updated': '2014-03-11 00:20:08.946000000',
'current_revision': 'THIRD',
'owner': {
'name': 'Some Person',
},
'revisions': {
'THIRD': {
'_number': 3,
},
'SECOND': {
'_number': 2,
},
'FIRST': {
'_number': 1,
},
},
'labels': {
'Commit-Queue': {
'recommended': { '_account_id': 1 }
},
'Test-Label': {
'disliked': { '_account_id' : 42 }
},
'Code-Review': {
'approved': { '_account_id': 2 }
},
},
'messages': [
{
'id': 1,
'author': 'test-user@test.org',
'date': '2014-02-11 12:10:14.311200000',
'message': 'MESSAGE1',
},
{
'id': 2,
'date': '2014-02-11 12:11:14.311200000',
'message': 'MESSAGE2',
'_revision_number': 2,
},
],
}
MOCK_AUTH=('git-commit-bot@chromium.org', 'secret')
def _create_mock_return(content, code):
r = requests.Response()
r._content = content
r.status_code = code
return r
# TODO(akuegel): Add more test cases and remove the pragma no covers.
class GerritAgentTestCase(unittest.TestCase):
def setUp(self):
self.gerrit = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH))
self.gerrit_read_only = gerrit_api.Gerrit(
'chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
read_only=True)
@mock.patch.object(requests.Session, 'request')
def test_request_no_leading_slash(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s[]' % GERRIT_JSON_HEADER, 200)
result = self.gerrit._request(method='GET',
request_path='changes/?q=query:no_results')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'?q=query:no_results'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, (200, []))
@mock.patch.object(gerrit_api.Gerrit, '_sleep')
@mock.patch.object(time, 'time')
@mock.patch.object(requests.Session, 'request')
def test_request_throttled(self, mock_method, time_mock_method, sleep_mock):
gerrit_throttled = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
0.1)
mock_method.return_value = _create_mock_return(None, 404)
time_mock_method.return_value = 100
gerrit_throttled._request(method='GET', request_path='/accounts/self')
# Call it twice to test the throttling.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
sleep_mock.assert_called_once_with(0)
time_mock_method.return_value = 101
# Call it again after exceeding the throttle to cover the other branch.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
@mock.patch.object(requests.Session, 'request')
def test_get_account(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s{"_account_id":1000096,"name":"John Doe","email":'
'"john.doe@test.com","username":"john"}') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.get_account('self')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url='https://chromium-review.googlesource.com/a/accounts/self',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = {
'_account_id': 1000096,
'name': 'John Doe',
'email': 'john.doe@test.com',
'username': 'john'
}
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_get_account_404(self, mock_method):
mock_method.return_value = _create_mock_return(None, 404)
result = self.gerrit.get_account('does.not@exist.com')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com'
'/a/accounts/does.not@exist.com'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_account_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 201)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_account, 'self')
@mock.patch.object(requests.Session, 'request')
def test_list_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.list_group_members('test-group')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_list_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.list_group_members, 'test-group')
def test_list_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.list_group_members, 'a/b/c')
@mock.patch.object(requests.Session, 'request')
def test_add_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.add_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.add'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_add_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.add_group_members, 'test-group', ['a@b.com'])
def test_add_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.add_group_members, 'a/b/c', [])
def test_add_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.add_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"jane.roe@example.com","username": "jane"}]') % GERRIT_JSON_HEADER,
204)
members = ['jane.roe@example.com']
payload = { 'members': members }
result = self.gerrit.delete_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.delete'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': 'jane.roe@example.com',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(
gerrit_api.UnexpectedResponseException,
self.gerrit.delete_group_members, 'test-group', ['a@b.com'])
def test_delete_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.delete_group_members, 'a/b/c', [])
def test_delete_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.delete_group_members,
'test-group', ['a@b.com'])
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s"parent"' % GERRIT_JSON_HEADER, 200)
result = self.gerrit.set_project_parent('project', 'parent')
payload = {
'parent': 'parent',
'commit_message': 'Changing parent project to parent'
}
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='PUT',
params=None,
url=('https://chromium-review.googlesource.com/a/projects/'
'project/parent'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, 'parent')
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_project_parent, 'a', 'b')
@mock.patch.object(requests.Session, 'request')
def test_query(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test',
with_labels=False, with_revisions=False,
owner='test@chromium.org')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test owner:test@chromium.org', 'o': ['MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_with_query_name(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test', query_name='pending_cls',
owner='1012155')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test query:pending_cls owner:1012155',
'o': ['CURRENT_REVISION', 'LABELS', 'MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.query, 'a', with_messages=False,
with_labels=False, with_revisions=False)
@mock.patch.object(requests.Session, 'request')
def test_get_issue(self, mock_method):
# By default, Gerrit doesn't return revisions data.
info_without_revisions = TEST_CHANGE_INFO.copy()
info_without_revisions.pop('revisions')
info_without_revisions.pop('current_revision')
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_without_revisions)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_without_revisions)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files(self, mock_method):
info_with_files = copy.deepcopy(TEST_CHANGE_INFO)
current = info_with_files['current_revision']
info_with_files['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_with_files)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True)
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'CURRENT_REVISION']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_with_files)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files_and_revisions(self, mock_method):
info = copy.deepcopy(TEST_CHANGE_INFO)
current = info['current_revision']
info['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True,
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_all_revisions(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(TEST_CHANGE_INFO)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, TEST_CHANGE_INFO)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_not_found(self, mock_method):
mock_method.return_value = _create_mock_return('Not found', 404)
result = self.gerrit.get_issue('unknown~branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'unknown~branch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_issue, 'issue')
@mock.patch.object(requests.Session, 'request')
def test_set_review(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id', 'Test message.',
{ 'Code-Review': 1 })
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_only_label(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id',
labels={ 'Code-Review': 1 }, notify='OWNER')
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD_LABELS_ONLY),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_review, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'status': 'MERGE'})), 200)
self.gerrit.submit_revision('change_id', 'current_revision_id')
mock_method.assert_called_once_with(
data=json.dumps({'wait_for_merge': True}),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/current_revision_id/submit'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_revision_conflict(self, mock_method):
mock_method.return_value = _create_mock_return(
'revision revision_id is not current revision', 409)
self.assertRaises(gerrit_api.RevisionConflictException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
| true | true |
f724d0f2012370079322010867b41194ad671123 | 330 | py | Python | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 1,139 | 2018-05-09T11:54:36.000Z | 2022-03-31T06:52:50.000Z | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 56 | 2018-06-20T03:52:53.000Z | 2022-02-09T22:57:41.000Z | Python-3/basic_examples/strings/trim-string.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 2,058 | 2018-05-09T09:32:17.000Z | 2022-03-29T13:19:42.000Z | s1 = ' abc '
print(f'String =\'{s1}\'')
print(f'After Removing Leading Whitespaces String =\'{s1.lstrip()}\'')
print(f'After Removing Trailing Whitespaces String =\'{s1.rstrip()}\'')
print(f'After Trimming Whitespaces String =\'{s1.strip()}\'')
# string with new line
s1 = ' X\n Y \nZ \t'
print(s1)
print(s1.strip())
| 19.411765 | 71 | 0.633333 | s1 = ' abc '
print(f'String =\'{s1}\'')
print(f'After Removing Leading Whitespaces String =\'{s1.lstrip()}\'')
print(f'After Removing Trailing Whitespaces String =\'{s1.rstrip()}\'')
print(f'After Trimming Whitespaces String =\'{s1.strip()}\'')
s1 = ' X\n Y \nZ \t'
print(s1)
print(s1.strip())
| true | true |
f724d12f6d6351caa87e074ea046e25613b6fe8c | 413 | py | Python | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | null | null | null | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | 1 | 2022-01-21T22:07:02.000Z | 2022-01-22T11:19:31.000Z | Task1E.py | ginnylaw/138-floodwarningsystem | dc9b674c5517761904062c5b35729d8f14504c48 | [
"MIT"
] | null | null | null | # Not Copyright (¬C) 2022 Greg S. Kurzepa
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1E"""
station_list = build_station_list()
output = rivers_by_station_number(station_list, 9)
print(output)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run() | 27.533333 | 63 | 0.72155 |
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
def run():
station_list = build_station_list()
output = rivers_by_station_number(station_list, 9)
print(output)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run() | true | true |
f724d145f5fb4bdcfe48b20384224152e82d9a51 | 127 | py | Python | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | 3 | 2019-01-02T03:00:17.000Z | 2021-06-06T02:00:44.000Z | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | null | null | null | oss4blog/__init__.py | JianxunRao/oss4blog | 9e328ad5d2bc23806ef1c4d149f0bcc916674d03 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/2/5 0005 上午 8:56
# @Author : Trojx
# @File : __init__.py.py | 25.4 | 34 | 0.559055 | true | true | |
f724d19652f09efe12713994a7c76259c5afea06 | 3,189 | py | Python | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 163 | 2019-06-23T14:07:57.000Z | 2022-02-25T23:06:07.000Z | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 8 | 2019-07-24T12:41:31.000Z | 2022-02-10T00:17:20.000Z | ParlAI/parlai/tasks/mutualfriends/agents.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 31 | 2019-06-26T01:21:07.000Z | 2021-09-06T17:23:24.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
class DefaultTeacher(DialogTeacher):
"""MutualFriends dataset."""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if not opt['datatype'].startswith('train'):
raise RuntimeError('MutualFriends only has a training set.')
opt['datafile'] = os.path.join(opt['datapath'], 'MutualFriends', 'data.json')
self.id = 'mutualfriends'
super().__init__(opt, shared)
def act(self):
"""Use DialogTeacher act but set id to "Teacher" for intro message."""
reply = super().act()
if reply.get('text', '').startswith('You have the following friends'):
reply['id'] = 'Teacher'
return reply
def setup_data(self, path):
"""Load json data of conversations."""
print('loading: ' + path)
with open(path) as data_file:
self.loaded_data = json.load(data_file)
for ex in self.loaded_data:
if len(ex['events']) > 0:
# TODO: add reverse conversation as well
curr_agent = ex['events'][0]['agent']
conversation = [
(
'You have the following friends:\n'
+ '\n'.join(
', '.join('{}={}'.format(k, v) for k, v in person.items())
for person in ex['scenario']['kbs'][int(curr_agent)]
)
+ '\nTry to find out which friend the other person has in common.'
)
]
curr = ''
idx = 0
while idx < len(ex['events']):
msg = ex['events'][idx]['data']
if type(msg) == dict:
msg = 'SELECT({})'.format(
', '.join('{}={}'.format(k, v) for k, v in msg.items())
)
next_agent = ex['events'][idx]['agent']
if curr_agent == next_agent:
curr += '\n' + msg
curr = curr.strip()
else:
conversation.append(curr)
curr = msg
curr_agent = next_agent
idx += 1
conversation.append(curr)
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation) - 1:
yield (conversation[i], [conversation[i + 1]]), i == 0
elif i + 1 == len(conversation) - 1:
yield (
(conversation[i], [conversation[i + 1]], ex['outcome']),
False,
)
else:
yield (conversation[i], None, ex['outcome']), False
| 39.37037 | 90 | 0.461587 |
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if not opt['datatype'].startswith('train'):
raise RuntimeError('MutualFriends only has a training set.')
opt['datafile'] = os.path.join(opt['datapath'], 'MutualFriends', 'data.json')
self.id = 'mutualfriends'
super().__init__(opt, shared)
def act(self):
reply = super().act()
if reply.get('text', '').startswith('You have the following friends'):
reply['id'] = 'Teacher'
return reply
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.loaded_data = json.load(data_file)
for ex in self.loaded_data:
if len(ex['events']) > 0:
curr_agent = ex['events'][0]['agent']
conversation = [
(
'You have the following friends:\n'
+ '\n'.join(
', '.join('{}={}'.format(k, v) for k, v in person.items())
for person in ex['scenario']['kbs'][int(curr_agent)]
)
+ '\nTry to find out which friend the other person has in common.'
)
]
curr = ''
idx = 0
while idx < len(ex['events']):
msg = ex['events'][idx]['data']
if type(msg) == dict:
msg = 'SELECT({})'.format(
', '.join('{}={}'.format(k, v) for k, v in msg.items())
)
next_agent = ex['events'][idx]['agent']
if curr_agent == next_agent:
curr += '\n' + msg
curr = curr.strip()
else:
conversation.append(curr)
curr = msg
curr_agent = next_agent
idx += 1
conversation.append(curr)
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation) - 1:
yield (conversation[i], [conversation[i + 1]]), i == 0
elif i + 1 == len(conversation) - 1:
yield (
(conversation[i], [conversation[i + 1]], ex['outcome']),
False,
)
else:
yield (conversation[i], None, ex['outcome']), False
| true | true |
f724d1efb6cc2a309577cdfab02d22ed387da3a1 | 6,306 | py | Python | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | JakobGM/polars | fe10d4a180e59e5e34f4ab17303f12f1cd64e6c8 | [
"MIT"
] | null | null | null | import ctypes
import os
import sys
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from polars.datatypes import DataType, Date, Datetime
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard # pragma: no cover
def _process_null_values(
null_values: Union[None, str, List[str], Dict[str, str]] = None,
) -> Union[None, str, List[str], List[Tuple[str, str]]]:
if isinstance(null_values, dict):
return list(null_values.items())
else:
return null_values
# https://stackoverflow.com/questions/4355524/getting-data-from-ctypes-array-into-numpy
def _ptr_to_numpy(ptr: int, len: int, ptr_type: Any) -> np.ndarray:
"""
Parameters
----------
ptr
C/Rust ptr casted to usize.
len
Length of the array values.
ptr_type
Example:
f32: ctypes.c_float)
Returns
-------
View of memory block as numpy array.
"""
ptr_ctype = ctypes.cast(ptr, ctypes.POINTER(ptr_type))
return np.ctypeslib.as_array(ptr_ctype, (len,))
def _timedelta_to_pl_duration(td: timedelta) -> str:
return f"{td.days}d{td.seconds}s{td.microseconds}us"
def in_nanoseconds_window(dt: datetime) -> bool:
return 1386 < dt.year < 2554
def timedelta_in_nanoseconds_window(td: timedelta) -> bool:
return in_nanoseconds_window(datetime(1970, 1, 1) + td)
def _datetime_to_pl_timestamp(dt: datetime, tu: Optional[str]) -> int:
"""
Converts a python datetime to a timestamp in nanoseconds
"""
if tu == "ns":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e9)
elif tu == "us":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
elif tu == "ms":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e3)
if tu is None:
# python has us precision
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
else:
raise ValueError("expected on of {'ns', 'ms'}")
def _timedelta_to_pl_timedelta(td: timedelta, tu: Optional[str] = None) -> int:
if tu == "ns":
return int(td.total_seconds() * 1e9)
elif tu == "us":
return int(td.total_seconds() * 1e6)
elif tu == "ms":
return int(td.total_seconds() * 1e3)
if tu is None:
if timedelta_in_nanoseconds_window(td):
return int(td.total_seconds() * 1e9)
else:
return int(td.total_seconds() * 1e3)
else:
raise ValueError("expected one of {'ns', 'us, 'ms'}")
def _date_to_pl_date(d: date) -> int:
dt = datetime.combine(d, datetime.min.time()).replace(tzinfo=timezone.utc)
return int(dt.timestamp()) // (3600 * 24)
def is_str_sequence(
val: Sequence[object], allow_str: bool = False
) -> TypeGuard[Sequence[str]]:
"""
Checks that `val` is a sequence of strings. Note that a single string is a sequence of strings
by definition, use `allow_str=False` to return False on a single string
"""
if (not allow_str) and isinstance(val, str):
return False
return _is_iterable_of(val, Sequence, str)
def is_int_sequence(val: Sequence[object]) -> TypeGuard[Sequence[int]]:
return _is_iterable_of(val, Sequence, int)
def _is_iterable_of(val: Iterable, itertype: Type, eltype: Type) -> bool:
return isinstance(val, itertype) and all(isinstance(x, eltype) for x in val)
def range_to_slice(rng: range) -> slice:
step: Optional[int]
# maybe we can slice instead of take by indices
if rng.step != 1:
step = rng.step
else:
step = None
return slice(rng.start, rng.stop, step)
def handle_projection_columns(
columns: Optional[Union[List[str], List[int]]]
) -> Tuple[Optional[List[int]], Optional[List[str]]]:
projection: Optional[List[int]] = None
if columns:
if is_int_sequence(columns):
projection = columns # type: ignore
columns = None
elif not is_str_sequence(columns):
raise ValueError(
"columns arg should contain a list of all integers or all strings values."
)
return projection, columns # type: ignore
def _to_python_timedelta(
value: Union[int, float], tu: Optional[str] = "ns"
) -> timedelta:
if tu == "ns":
return timedelta(microseconds=value // 1e3)
elif tu == "us":
return timedelta(microseconds=value)
elif tu == "ms":
return timedelta(milliseconds=value)
else:
raise ValueError(f"time unit: {tu} not expected")
def _prepare_row_count_args(
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> Optional[Tuple[str, int]]:
if row_count_name is not None:
return (row_count_name, row_count_offset)
else:
return None
EPOCH = datetime(1970, 1, 1).replace(tzinfo=None)
def _to_python_datetime(
value: Union[int, float], dtype: Type[DataType], tu: Optional[str] = "ns"
) -> Union[date, datetime]:
if dtype == Date:
# days to seconds
# important to create from utc. Not doing this leads
# to inconsistencies dependent on the timezone you are in.
return datetime.utcfromtimestamp(value * 3600 * 24).date()
elif dtype == Datetime:
if tu == "ns":
# nanoseconds to seconds
return EPOCH + timedelta(microseconds=value / 1000)
if tu == "us":
return EPOCH + timedelta(microseconds=value)
elif tu == "ms":
# milliseconds to seconds
return datetime.utcfromtimestamp(value / 1_000)
else:
raise ValueError(f"time unit: {tu} not expected")
else:
raise NotImplementedError # pragma: no cover
def _in_notebook() -> bool:
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
return False
except ImportError:
return False
except AttributeError:
return False
return True
def format_path(path: Union[str, Path]) -> str:
"""
Returnsa string path, expanding the home directory if present.
"""
return os.path.expanduser(path)
| 29.605634 | 98 | 0.64288 | import ctypes
import os
import sys
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from polars.datatypes import DataType, Date, Datetime
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
def _process_null_values(
null_values: Union[None, str, List[str], Dict[str, str]] = None,
) -> Union[None, str, List[str], List[Tuple[str, str]]]:
if isinstance(null_values, dict):
return list(null_values.items())
else:
return null_values
def _ptr_to_numpy(ptr: int, len: int, ptr_type: Any) -> np.ndarray:
ptr_ctype = ctypes.cast(ptr, ctypes.POINTER(ptr_type))
return np.ctypeslib.as_array(ptr_ctype, (len,))
def _timedelta_to_pl_duration(td: timedelta) -> str:
return f"{td.days}d{td.seconds}s{td.microseconds}us"
def in_nanoseconds_window(dt: datetime) -> bool:
return 1386 < dt.year < 2554
def timedelta_in_nanoseconds_window(td: timedelta) -> bool:
return in_nanoseconds_window(datetime(1970, 1, 1) + td)
def _datetime_to_pl_timestamp(dt: datetime, tu: Optional[str]) -> int:
if tu == "ns":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e9)
elif tu == "us":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
elif tu == "ms":
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e3)
if tu is None:
return int(dt.replace(tzinfo=timezone.utc).timestamp() * 1e6)
else:
raise ValueError("expected on of {'ns', 'ms'}")
def _timedelta_to_pl_timedelta(td: timedelta, tu: Optional[str] = None) -> int:
if tu == "ns":
return int(td.total_seconds() * 1e9)
elif tu == "us":
return int(td.total_seconds() * 1e6)
elif tu == "ms":
return int(td.total_seconds() * 1e3)
if tu is None:
if timedelta_in_nanoseconds_window(td):
return int(td.total_seconds() * 1e9)
else:
return int(td.total_seconds() * 1e3)
else:
raise ValueError("expected one of {'ns', 'us, 'ms'}")
def _date_to_pl_date(d: date) -> int:
dt = datetime.combine(d, datetime.min.time()).replace(tzinfo=timezone.utc)
return int(dt.timestamp()) // (3600 * 24)
def is_str_sequence(
val: Sequence[object], allow_str: bool = False
) -> TypeGuard[Sequence[str]]:
if (not allow_str) and isinstance(val, str):
return False
return _is_iterable_of(val, Sequence, str)
def is_int_sequence(val: Sequence[object]) -> TypeGuard[Sequence[int]]:
return _is_iterable_of(val, Sequence, int)
def _is_iterable_of(val: Iterable, itertype: Type, eltype: Type) -> bool:
return isinstance(val, itertype) and all(isinstance(x, eltype) for x in val)
def range_to_slice(rng: range) -> slice:
step: Optional[int]
# maybe we can slice instead of take by indices
if rng.step != 1:
step = rng.step
else:
step = None
return slice(rng.start, rng.stop, step)
def handle_projection_columns(
columns: Optional[Union[List[str], List[int]]]
) -> Tuple[Optional[List[int]], Optional[List[str]]]:
projection: Optional[List[int]] = None
if columns:
if is_int_sequence(columns):
projection = columns # type: ignore
columns = None
elif not is_str_sequence(columns):
raise ValueError(
"columns arg should contain a list of all integers or all strings values."
)
return projection, columns # type: ignore
def _to_python_timedelta(
value: Union[int, float], tu: Optional[str] = "ns"
) -> timedelta:
if tu == "ns":
return timedelta(microseconds=value // 1e3)
elif tu == "us":
return timedelta(microseconds=value)
elif tu == "ms":
return timedelta(milliseconds=value)
else:
raise ValueError(f"time unit: {tu} not expected")
def _prepare_row_count_args(
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> Optional[Tuple[str, int]]:
if row_count_name is not None:
return (row_count_name, row_count_offset)
else:
return None
EPOCH = datetime(1970, 1, 1).replace(tzinfo=None)
def _to_python_datetime(
value: Union[int, float], dtype: Type[DataType], tu: Optional[str] = "ns"
) -> Union[date, datetime]:
if dtype == Date:
# days to seconds
# important to create from utc. Not doing this leads
# to inconsistencies dependent on the timezone you are in.
return datetime.utcfromtimestamp(value * 3600 * 24).date()
elif dtype == Datetime:
if tu == "ns":
# nanoseconds to seconds
return EPOCH + timedelta(microseconds=value / 1000)
if tu == "us":
return EPOCH + timedelta(microseconds=value)
elif tu == "ms":
# milliseconds to seconds
return datetime.utcfromtimestamp(value / 1_000)
else:
raise ValueError(f"time unit: {tu} not expected")
else:
raise NotImplementedError # pragma: no cover
def _in_notebook() -> bool:
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
return False
except ImportError:
return False
except AttributeError:
return False
return True
def format_path(path: Union[str, Path]) -> str:
return os.path.expanduser(path)
| true | true |
f724d251d69499fc6e1ec87430fba69964909b5d | 2,310 | py | Python | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | 1 | 2022-02-13T12:27:40.000Z | 2022-02-13T12:27:40.000Z | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | null | null | null | tests/test_datasets/test_dataset_wrapper.py | pallgeuer/mmpose | d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv import Config
from mmpose.datasets.builder import build_dataset
def test_concat_dataset():
# build COCO-like dataset config
dataset_info = Config.fromfile(
'configs/_base_/datasets/coco.py').dataset_info
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
dataset_cfg = dict(
type='TopDownCocoDataset',
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
dataset_info=dataset_info)
dataset = build_dataset(dataset_cfg)
# Case 1: build ConcatDataset explicitly
concat_dataset_cfg = dict(
type='ConcatDataset', datasets=[dataset_cfg, dataset_cfg])
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
# Case 2: build ConcatDataset from cfg sequence
concat_dataset = build_dataset([dataset_cfg, dataset_cfg])
assert len(concat_dataset) == 2 * len(dataset)
# Case 3: build ConcatDataset from ann_file sequence
concat_dataset_cfg = dataset_cfg.copy()
for key in ['ann_file', 'type', 'img_prefix', 'dataset_info']:
val = concat_dataset_cfg[key]
concat_dataset_cfg[key] = [val] * 2
for key in ['num_joints', 'dataset_channel']:
val = concat_dataset_cfg['data_cfg'][key]
concat_dataset_cfg['data_cfg'][key] = [val] * 2
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
| 33.970588 | 71 | 0.646753 |
from mmcv import Config
from mmpose.datasets.builder import build_dataset
def test_concat_dataset():
dataset_info = Config.fromfile(
'configs/_base_/datasets/coco.py').dataset_info
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
dataset_cfg = dict(
type='TopDownCocoDataset',
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
dataset_info=dataset_info)
dataset = build_dataset(dataset_cfg)
concat_dataset_cfg = dict(
type='ConcatDataset', datasets=[dataset_cfg, dataset_cfg])
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
concat_dataset = build_dataset([dataset_cfg, dataset_cfg])
assert len(concat_dataset) == 2 * len(dataset)
concat_dataset_cfg = dataset_cfg.copy()
for key in ['ann_file', 'type', 'img_prefix', 'dataset_info']:
val = concat_dataset_cfg[key]
concat_dataset_cfg[key] = [val] * 2
for key in ['num_joints', 'dataset_channel']:
val = concat_dataset_cfg['data_cfg'][key]
concat_dataset_cfg['data_cfg'][key] = [val] * 2
concat_dataset = build_dataset(concat_dataset_cfg)
assert len(concat_dataset) == 2 * len(dataset)
| true | true |
f724d3be4fab7267380619189339e046a243a317 | 741 | py | Python | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 38 | 2022-01-12T14:17:25.000Z | 2022-03-23T06:34:23.000Z | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 5 | 2022-01-19T12:14:45.000Z | 2022-03-22T15:59:12.000Z | face_recon_deform/PhotoAvatarLib_exe/run.py | halfjoe/3D-Portrait-Stylization | ccf0edd5cf7764d67d2740aa0e2cd18cc503c937 | [
"MIT"
] | 6 | 2022-01-14T06:59:37.000Z | 2022-03-15T03:58:54.000Z | import os
for file in os.listdir("upload"):
if file.endswith(".jpg"):
print(file.rsplit('.', 1)[0])
os.system('PhotoAvatarLib.exe ' + file.rsplit('.', 1)[0])
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '.mtl'), "w")
fp.write('newmtl material_1\nmap_Kd %s_face.jpg' % file.rsplit('.', 1)[0])
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "r")
fstr = fp.read()
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "w")
fp.write('mtllib %s.mtl\nusemtl material_1\n' % file.rsplit('.', 1)[0])
fp.write(fstr)
fp.close()
| 35.285714 | 95 | 0.522267 | import os
for file in os.listdir("upload"):
if file.endswith(".jpg"):
print(file.rsplit('.', 1)[0])
os.system('PhotoAvatarLib.exe ' + file.rsplit('.', 1)[0])
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '.mtl'), "w")
fp.write('newmtl material_1\nmap_Kd %s_face.jpg' % file.rsplit('.', 1)[0])
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "r")
fstr = fp.read()
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "w")
fp.write('mtllib %s.mtl\nusemtl material_1\n' % file.rsplit('.', 1)[0])
fp.write(fstr)
fp.close()
| true | true |
f724d762255165511edcd4f30973356a4b81b6a1 | 964 | py | Python | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | 3 | 2020-04-18T19:45:51.000Z | 2022-03-01T19:48:11.000Z | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | 39 | 2019-11-16T01:35:35.000Z | 2021-11-18T12:58:41.000Z | tests/test_main.py | thorgate/pyevr | 168f2e9459020212213ed0291882a285ebb53839 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyevr.main`."""
import pytest
from click.testing import CliRunner
from pyevr.main import main
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(main)
assert result.exit_code == 0
assert 'pyevr.cli.main' in result.output
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 25.368421 | 78 | 0.690871 |
import pytest
from click.testing import CliRunner
from pyevr.main import main
@pytest.fixture
def response():
def test_content(response):
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(main)
assert result.exit_code == 0
assert 'pyevr.cli.main' in result.output
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| true | true |
f724d7d23d4236fb0d0aeead2ccfc8a44b4b705c | 17,325 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_user.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
hashed_password:
description:
- This option allows configuring hashed passwords on Cisco IOS devices.
suboptions:
type:
description:
- Specifies the type of hash (e.g., 5 for MD5, 8 for PBKDF2, etc.)
- For this to work, the device needs to support the desired hash type
type: int
required: True
value:
description:
- The actual hashed password to be configured on the device
required: True
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies one or more SSH public key(s) to configure
for the given username.
- This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: create a new user with multiple keys
ios_user:
name: ansible
sshkey:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
- "{{ lookup('file', '~/path/to/public_key') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Add a user with MD5 hashed password
ios_user:
name: ansibletest5
hashed_password:
type: 5
value: $3$8JcDilcYgFZi.yz4ApaqkHG2.8/
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
import base64
import hashlib
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
# IOS will accept a MD5 fingerprint of the public key
# and is easier to configure in a single line
# we calculate this fingerprint here
if not sshkey:
return None
if ' ' in sshkey:
# ssh-rsa AAA...== comment
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
# just the key, assume rsa type
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_hashed_password(command, want, x):
command.append('username %s secret %s %s' % (want['name'], x.get('type'),
x.get('value')))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
for item in x:
command.append('key-hash %s' % item)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'hashed_password'):
add_hashed_password(commands, want, want['hashed_password'])
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data, user):
sshregex = r'username %s(\n\s+key-hash .+$)+' % user
sshcfg = re.search(sshregex, data, re.M)
key_list = []
if sshcfg:
match = re.findall(r'key-hash (\S+ \S+(?: .+)?)$', sshcfg.group(), re.M)
if match:
key_list = match
return key_list
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| section username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'hashed_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(data, user),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['hashed_password'] = get_value('hashed_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = render_key_list(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def render_key_list(ssh_keys):
key_list = []
if ssh_keys:
for item in ssh_keys:
key_list.append(sshkey_fingerprint(item))
return key_list
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
hashed_password_spec = dict(
type=dict(type='int', required=True),
value=dict(no_log=True, required=True)
)
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
hashed_password=dict(no_log=True, type='dict', options=hashed_password_spec),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate'), ('nopassword', 'hashed_password', 'configured_password')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.847426 | 110 | 0.633709 |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
hashed_password:
description:
- This option allows configuring hashed passwords on Cisco IOS devices.
suboptions:
type:
description:
- Specifies the type of hash (e.g., 5 for MD5, 8 for PBKDF2, etc.)
- For this to work, the device needs to support the desired hash type
type: int
required: True
value:
description:
- The actual hashed password to be configured on the device
required: True
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies one or more SSH public key(s) to configure
for the given username.
- This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: create a new user with multiple keys
ios_user:
name: ansible
sshkey:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
- "{{ lookup('file', '~/path/to/public_key') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Add a user with MD5 hashed password
ios_user:
name: ansibletest5
hashed_password:
type: 5
value: $3$8JcDilcYgFZi.yz4ApaqkHG2.8/
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
import base64
import hashlib
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
if not sshkey:
return None
if ' ' in sshkey:
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_hashed_password(command, want, x):
command.append('username %s secret %s %s' % (want['name'], x.get('type'),
x.get('value')))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
for item in x:
command.append('key-hash %s' % item)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'hashed_password'):
add_hashed_password(commands, want, want['hashed_password'])
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data, user):
sshregex = r'username %s(\n\s+key-hash .+$)+' % user
sshcfg = re.search(sshregex, data, re.M)
key_list = []
if sshcfg:
match = re.findall(r'key-hash (\S+ \S+(?: .+)?)$', sshcfg.group(), re.M)
if match:
key_list = match
return key_list
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| section username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'hashed_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(data, user),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['hashed_password'] = get_value('hashed_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = render_key_list(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def render_key_list(ssh_keys):
key_list = []
if ssh_keys:
for item in ssh_keys:
key_list.append(sshkey_fingerprint(item))
return key_list
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
hashed_password_spec = dict(
type=dict(type='int', required=True),
value=dict(no_log=True, required=True)
)
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
hashed_password=dict(no_log=True, type='dict', options=hashed_password_spec),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate'), ('nopassword', 'hashed_password', 'configured_password')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| true | true |
f724d87fd763688168a55ea7c5a6817849d45718 | 125 | py | Python | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | src/hist/numpy.py | andrzejnovak/hist | 15a41565ac9a3683bff74b98803c4b88ad8a19ae | [
"BSD-3-Clause"
] | null | null | null | from boost_histogram.numpy import histogram, histogram2d, histogramdd
__all__ = ("histogram", "histogram2d", "histogramdd")
| 31.25 | 69 | 0.792 | from boost_histogram.numpy import histogram, histogram2d, histogramdd
__all__ = ("histogram", "histogram2d", "histogramdd")
| true | true |
f724d8ab5bf6fadc70a44f28e9bffcf70edecf16 | 1,732 | py | Python | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 3 | 2020-06-23T11:59:14.000Z | 2020-12-03T15:20:18.000Z | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 1 | 2020-06-23T12:01:41.000Z | 2020-06-23T12:01:41.000Z | tools/randomData.py | Tandelajr/mr.tandela | 096cce682de58f2a7035d3e114787a78a1015a9b | [
"MIT"
] | 1 | 2020-12-03T15:20:26.000Z | 2020-12-03T15:20:26.000Z | #!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import random
# Get random IP
def random_IP():
ip = []
for _ in range(0, 4):
ip.append(str(random.randint(1,255)))
return ".".join(ip)
# Get random referer
def random_referer():
with open("tools/other/referers.txt", 'r') as referers:
referers = referers.readlines()
return random.choice(referers)
# Get random user agent
def random_useragent():
with open("tools/other/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
| 37.652174 | 80 | 0.741917 |
import json
import random
def random_IP():
ip = []
for _ in range(0, 4):
ip.append(str(random.randint(1,255)))
return ".".join(ip)
def random_referer():
with open("tools/other/referers.txt", 'r') as referers:
referers = referers.readlines()
return random.choice(referers)
def random_useragent():
with open("tools/other/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
| true | true |
f724d950f3f0f4ab4df0111f810aec962a3b5e21 | 149,021 | py | Python | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/stats.py | Dapid/scipy | dde07a64407ffaa9442b3d8298c6c26ff91fb384 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import futil
from . import distributions
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
return a, b, outaxis
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array
Returns
-------
find_repeats : tuple
Returns a tuple of two 1-D ndarrays. The first ndarray are the repeats
as sorted, unique values that are repeated in `arr`. The second
ndarray are the counts mapped one-to-one of the repeated values
in the first ndarray.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
(array([ 2. ]), array([ 4 ], dtype=int32)
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
(array([ 4., 5.]), array([2, 2], dtype=int32))
"""
v1, v2, n = futil.dfreps(arr)
return v1[:n], v2[:n]
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
vals : ndarray
Array of modal values.
counts : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
[3, 2, 1, 7],
[8, 1, 8, 4],
[5, 3, 0, 5],
[4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify axis=None:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tmean : float
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = mask_to_limits(a.ravel(), limits, inclusive)
return am.mean()
def masked_var(am):
m = am.mean()
s = ma.add.reduce((am - m)**2)
n = am.count() - 1.0
return s / n
def tvar(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = mask_to_limits(a, limits, inclusive)
return masked_var(am)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the whole
array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tstd(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
"""
return np.sqrt(tvar(a, limits, inclusive))
def tsem(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=1) / np.sqrt(a.size)
am = mask_to_limits(a, limits, inclusive)
sd = np.sqrt(masked_var(am))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Generally used to calculate coefficients of skewness and
kurtosis.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
_DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Biased skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Biased kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom or bias
correction is used.
See Also
--------
skew, kurtosis
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
# Return namedtuple for clarity
return _DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return Z, 2 * distributions.norm.sf(np.abs(Z))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return Z, 2 * distributions.norm.sf(np.abs(Z))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
k2 : float or array
`s^2 + k^2`, where `s` is the z-score returned by `skewtest` and
`k` is the z-score returned by `kurtosistest`.
p-value : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return k2, chisqprob(k2, 2)
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
histogram : ndarray
Number of points (or sum of weights) in each bin.
low_range : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return hist, defaultlimits[0], binsize, extrapoints
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumfreq : ndarray
Binned values of cumulative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
>>> cumfreqs
array([ 3., 4., 5., 6.])
>>> cumfreqs, lowlim, binsize, extrapoints = \
... stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> cumfreqs
array([ 1., 2., 3., 3.])
>>> extrapoints
3
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return cumhist, l, b, e
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
relfreq : ndarray
Binned values of relative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 4, 2, 1, 3, 1])
>>> relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
>>> relfreqs
array([ 0.5 , 0.16666667, 0.16666667, 0.16666667])
>>> np.sum(relfreqs) # relative frequencies should add up to 1
0.99999999999999989
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = np.array(h / float(np.array(a).shape[0]))
return h, l, b, e
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
[ 0.7149, 0.0775, 0.6072, 0.9656],
[ 0.6341, 0.1403, 0.9759, 0.4064],
[ 0.5918, 0.6948, 0.904 , 0.3721],
[ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
c : ndarray
Input array with clipped elements removed.
critlower : float
Lower threshold value use for clipping.
critlupper : float
Upper threshold value use for clipping.
Examples
--------
>>> a = np.concatenate((np.linspace(9.5,10.5,31), np.linspace(0,20,5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5,10.5,11),
np.linspace(-100,-50,3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5,10.5,11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return c, critlower, critupper
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). You must pre-sort the array if you want
'proper' trimming. Slices off less if proportion results in a
non-integer slice index (i.e., conservatively slices off
`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
sl = [slice(None)] * a.ndim
sl[axis] = slice(lowercut, uppercut)
return a[sl]
def trim1(a, proportiontocut, tail='right'):
"""
Slices off a proportion of items from ONE end of the passed array
distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`
"""
a = asarray(a)
if tail.lower() == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut * len(a))
elif tail.lower() == 'left':
lowercut = int(proportiontocut * len(a))
uppercut = len(a)
return a[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both lower and upper
tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. Slices off LESS if proportion results in a non-integer slice
index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if axis is None:
nobs = a.size
else:
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut - 1
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
try:
atmp = np.partition(a, (lowercut, uppercut), axis)
except AttributeError:
atmp = np.sort(a, axis)
newa = trimboth(atmp, proportiontocut, axis=axis)
return np.mean(newa, axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
args = [np.asarray(arg, dtype=float) for arg in args]
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = np.concatenate(args)
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += square_of_sums(a) / float(len(a))
ssbn -= (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = na - 1
dfwn = bign - na
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return f, prob
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. In R language,
this implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%, if we
adopt that we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. Each column of `a` and `b` represents a variable, and
each row entry a single observation of those variables. See also
`axis`. Both arrays need to have the same length in the `axis`
dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=0, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
Returns
-------
rho : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return rs[1,0], prob[1,0]
else:
return rs, prob
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and the associated
p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
r : float
R value
p-value : float
2-tailed p-value
References
----------
http://en.wikipedia.org/wiki/Point-biserial_correlation_coefficient
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
x = np.asarray(x, dtype=bool)
y = np.asarray(y, dtype=float)
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(len(x))
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
# phat - phat**2 is more stable than phat*(1-phat)
rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()
df = n - 2
# fixme: see comment about TINY in pearsonr()
TINY = 1e-20
t = rpb * np.sqrt(df / ((1.0 - rpb + TINY)*(1.0 + rpb + TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def kendalltau(x, y, initial_lexsort=True):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
Returns
-------
Kendall's tau : float
The tau statistic.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if not x.size or not y.size:
return (np.nan, np.nan) # Return NaN if arrays are empty
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
return np.nan, np.nan # Special case for all ties in both ranks
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return tau, prob
def linregress(x, y=None):
"""
Calculate a regression line
This computes a least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
r-value : float
correlation coefficient
p-value : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimate
Examples
--------
>>> from scipy import stats
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
r-squared: 0.15286643777
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = asarray(x)
y = asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, prob, sterrest
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
y = np.asarray(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = find_repeats(x)
_, nyreps = find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
t : float or array
The calculated t-statistics
prob : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
return _ttest_ind_from_stats(mean1, mean2, denom, df)
def ttest_ind(a, b, axis=0, equal_var=True):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
Returns
-------
t : float or array
The calculated t-statistic.
prob : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return (np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
return _ttest_ind_from_stats(np.mean(a, axis),
np.mean(b, axis),
denom, df)
def ttest_rel(a, b, axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-.
p-value : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus, N)
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin, N)
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D * np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return D, distributions.kstwobign.sf(D * np.sqrt(N))
else:
return D, 2 * distributions.ksone.sf(D, N)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
stat : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = chisqprob(stat, num_obs - 1 - ddof)
return stat, p
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return d, prob
def mannwhitneyu(x, y, use_continuity=True):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
u : float
The Mann-Whitney statistics.
prob : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
if use_continuity:
# normal approximation for prob calc with continuity correction
z = abs((bigu - 0.5 - n1*n2/2.0) / sd)
else:
z = abs((bigu - n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, distributions.norm.sf(z)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
z-statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
p-value : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return z, prob
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
H-statistic : float
The Kruskal-Wallis H statistic, corrected for ties
p-value : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
"""
args = list(map(np.asarray, args)) # convert to a numpy array
na = len(args) # Kruskal-Wallis on 'na' groups, each in it's own array
if na < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
n = np.asarray(list(map(len, args)))
alldata = np.concatenate(args)
ranked = rankdata(alldata) # Rank the data
ties = tiecorrect(ranked) # Correct for ties
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(na):
ssbn += square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = na - 1
h /= ties
return h, chisqprob(h, df)
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
friedman chi-square statistic : float
the test statistic, correcting for ties
p-value : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return chisq, chisqprob(chisq, k - 1)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return special.chdtrc(df, chisq)
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
def ss(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
ss : The sum of squares (the opposite of `square_of_sums`).
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.square_of_sums(a)
array([ 1600., 2025., 2500., 3025.])
>>> stats.square_of_sums(a, axis=None)
36100.0
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| 31.699851 | 95 | 0.597949 |
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import futil
from . import distributions
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
return a, b, outaxis
def find_repeats(arr):
v1, v2, n = futil.dfreps(arr)
return v1[:n], v2[:n]
s deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d):
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
enonan = x[-s.size:][~c[-s.size:]]
x[s[:enonan.size]] = enonan
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'):
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
ims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def mask_to_limits(a, limits, inclusive):
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True)):
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = mask_to_limits(a.ravel(), limits, inclusive)
return am.mean()
def masked_var(am):
m = am.mean()
s = ma.add.reduce((am - m)**2)
n = am.count() - 1.0
return s / n
def tvar(a, limits=None, inclusive=(True, True)):
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = mask_to_limits(a, limits, inclusive)
return masked_var(am)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tstd(a, limits=None, inclusive=(True, True)):
return np.sqrt(tvar(a, limits, inclusive))
def tsem(a, limits=None, inclusive=(True, True)):
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=1) / np.sqrt(a.size)
am = mask_to_limits(a, limits, inclusive)
sd = np.sqrt(masked_var(am))
return sd / np.sqrt(am.count())
ract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item()
if fisher:
return vals - 3
else:
return vals
_DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
return _DescribeResult(n, mm, m, v, sk, kurt)
(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
return Z, 2 * distributions.norm.sf(np.abs(Z))
def normaltest(a, axis=0):
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return k2, chisqprob(k2, 2)
def jarque_bera(x):
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
def histogram2(a, bins):
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
a = np.ravel(a)
if defaultlimits is None:
data_min = a.min()
data_max = a.max()
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
binsize = bin_edges[1] - bin_edges[0]
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return hist, defaultlimits[0], binsize, extrapoints
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return cumhist, l, b, e
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = np.array(h / float(np.array(a).shape[0]))
return h, l, b, e
np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
def threshold(a, threshmin=None, threshmax=None, newval=0):
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return c, critlower, critupper
def trimboth(a, proportiontocut, axis=0):
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
sl = [slice(None)] * a.ndim
sl[axis] = slice(lowercut, uppercut)
return a[sl]
def trim1(a, proportiontocut, tail='right'):
a = asarray(a)
if tail.lower() == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut * len(a))
elif tail.lower() == 'left':
lowercut = int(proportiontocut * len(a))
uppercut = len(a)
return a[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
a = np.asarray(a)
if axis is None:
nobs = a.size
else:
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut - 1
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
try:
atmp = np.partition(a, (lowercut, uppercut), axis)
except AttributeError:
atmp = np.sort(a, axis)
newa = trimboth(atmp, proportiontocut, axis=axis)
return np.mean(newa, axis=axis)
def f_oneway(*args):
args = [np.asarray(arg, dtype=float) for arg in args]
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = np.concatenate(args)
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += square_of_sums(a) / float(len(a))
ssbn -= (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = na - 1
dfwn = bign - na
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def pearsonr(x, y):
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(ss(xm) * ss(ym))
r = r_num / r_den
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0):
a, axisout = _chk_asarray(a, axis)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore')
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return rs[1,0], prob[1,0]
else:
return rs, prob
def pointbiserialr(x, y):
x = np.asarray(x, dtype=bool)
y = np.asarray(y, dtype=float)
n = len(x)
phat = x.sum() / float(len(x))
y0 = y[~x]
y1 = y[x]
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()
df = n - 2
TINY = 1e-20
t = rpb * np.sqrt(df / ((1.0 - rpb + TINY)*(1.0 + rpb + TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def kendalltau(x, y, initial_lexsort=True):
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if not x.size or not y.size:
return (np.nan, np.nan)
n = np.int64(len(x))
temp = list(range(n))
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
if initial_lexsort:
perm = np.lexsort((y, x))
else:
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
exchanges = mergesort(0, n)
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
return np.nan, np.nan
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return tau, prob
def linregress(x, y=None):
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = asarray(x)
y = asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, prob, sterrest
def theilslopes(y, x=None, alpha=0.95):
y = np.asarray(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = find_repeats(x)
_, nyreps = find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0):
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _ttest_finish(df, t):
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
return _ttest_ind_from_stats(mean1, mean2, denom, df)
def ttest_ind(a, b, axis=0, equal_var=True):
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return (np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
return _ttest_ind_from_stats(np.mean(a, axis),
np.mean(b, axis),
denom, df)
def ttest_rel(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return t, prob
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus, N)
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin, N)
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D * np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return D, distributions.kstwobign.sf(D * np.sqrt(N))
else:
return D, 2 * distributions.ksone.sf(D, N)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = chisqprob(stat, num_obs - 1 - ddof)
return stat, p
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return d, prob
def mannwhitneyu(x, y, use_continuity=True):
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1]
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0)
u2 = n1*n2 - u1
bigu = max(u1, u2)
smallu = min(u1, u2)
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
if use_continuity:
z = abs((bigu - 0.5 - n1*n2/2.0) / sd)
else:
z = abs((bigu - n1*n2/2.0) / sd)
return smallu, distributions.norm.sf(z)
def ranksums(x, y):
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return z, prob
def kruskal(*args):
args = list(map(np.asarray, args))
na = len(args)
if na < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
n = np.asarray(list(map(len, args)))
alldata = np.concatenate(args)
ranked = rankdata(alldata) # Rank the data
ties = tiecorrect(ranked) # Correct for ties
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(na):
ssbn += square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = na - 1
h /= ties
return h, chisqprob(h, df)
def friedmanchisquare(*args):
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return chisq, chisqprob(chisq, k - 1)
def combine_pvalues(pvalues, method='fisher', weights=None):
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
def chisqprob(chisq, df):
return special.chdtrc(df, chisq)
def betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def f_value(ER, EF, dfR, dfF):
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
def f_value_multivariate(ER, EF, dfnum, dfden):
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
def ss(a, axis=0):
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def square_of_sums(a, axis=0):
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| true | true |
f724da7af2704b4ffec5878bcac55c4bb2e57d18 | 4,446 | py | Python | models/experimental/mnist_keras_ds/mnist.py | cs-gn/tpu | fadb409b8dae2385191050aa5c901d9084d8bb8c | [
"Apache-2.0"
] | 1 | 2020-08-27T18:52:09.000Z | 2020-08-27T18:52:09.000Z | models/experimental/mnist_keras_ds/mnist.py | omar16100/tpu | 4727594874e8587a60cb088627d46f73a1769823 | [
"Apache-2.0"
] | null | null | null | models/experimental/mnist_keras_ds/mnist.py | omar16100/tpu | 4727594874e8587a60cb088627d46f73a1769823 | [
"Apache-2.0"
] | 1 | 2019-03-25T07:50:04.000Z | 2019-03-25T07:50:04.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Experimental Keras MNIST Example.
To test on CPU:
python mnist.py --use_tpu=False [--fake_data=true]
To test on TPU:
python mnist.py --use_tpu=True [--tpu=$TPU_NAME]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
flags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
# Batch size should satify two properties to be able to run in cloud:
# num_eval_samples % batch_size == 0
# batch_size % 8 == 0
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
"""Creates a MNIST model."""
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
"""Run the model training and return evaluation output."""
use_tpu = FLAGS.use_tpu
strategy = None
if use_tpu:
strategy = tf.contrib.distribute.TPUStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu),
steps_per_run=100)
print('Mode:', 'TPU' if use_tpu else 'CPU')
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
# the data, split between train and test sets
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'],
distribute=strategy)
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| 31.531915 | 80 | 0.703554 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
flags.DEFINE_bool('use_tpu', True, 'Use TPU model instead of CPU.')
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
use_tpu = FLAGS.use_tpu
strategy = None
if use_tpu:
strategy = tf.contrib.distribute.TPUStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu),
steps_per_run=100)
print('Mode:', 'TPU' if use_tpu else 'CPU')
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'],
distribute=strategy)
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| true | true |
f724dabdf285c5d14bf55e9bd7e21f067f7b0934 | 403 | py | Python | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | award_project/wsgi.py | Esther-Anyona/Developer-Awards | 64030da79cc1ed993b1bc4420725b2a996be84da | [
"MIT"
] | null | null | null | """
WSGI config for award_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'award_project.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'award_project.settings')
application = get_wsgi_application()
| true | true |
f724db24b2380e8d19e2fcdab914785ada4e9c4a | 854 | py | Python | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | 1 | 2019-11-13T04:15:41.000Z | 2019-11-13T04:15:41.000Z | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | null | null | null | buildlib/helpers/events.py | ForwardLine/backup-nanny | 67c687f43d732c60ab2e569e50bc40cc5e696b25 | [
"Apache-2.0"
] | 1 | 2019-10-25T21:24:20.000Z | 2019-10-25T21:24:20.000Z | import logging
from troposphere.events import Rule, Target
from buildlib.helpers.client_helper import ClientHelper
class EventsHelper(object):
def __init__(self, template, project, session=None):
self.client = ClientHelper.get_client('events', session)
self.project = project
self.template = template
def create_cron_rule(self, schedule_expression, targets, state='ENABLED', name_prefix='', **kwargs):
return self.template.add_resource(Rule(
'{0}Rule'.format(name_prefix),
State=state,
Targets=targets,
ScheduleExpression=schedule_expression,
**kwargs
))
def create_target(self, arn, target_id, name_prefix=''):
return Target(
'{0}Target'.format(name_prefix),
Arn=arn,
Id=target_id
)
| 29.448276 | 104 | 0.637002 | import logging
from troposphere.events import Rule, Target
from buildlib.helpers.client_helper import ClientHelper
class EventsHelper(object):
def __init__(self, template, project, session=None):
self.client = ClientHelper.get_client('events', session)
self.project = project
self.template = template
def create_cron_rule(self, schedule_expression, targets, state='ENABLED', name_prefix='', **kwargs):
return self.template.add_resource(Rule(
'{0}Rule'.format(name_prefix),
State=state,
Targets=targets,
ScheduleExpression=schedule_expression,
**kwargs
))
def create_target(self, arn, target_id, name_prefix=''):
return Target(
'{0}Target'.format(name_prefix),
Arn=arn,
Id=target_id
)
| true | true |
f724db442a0f5748c892e969a3bc7eed6d4c5a14 | 16,050 | py | Python | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | 1 | 2015-11-22T15:53:00.000Z | 2015-11-22T15:53:00.000Z | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | pybvc/netconfdev/vrouter/interfaces.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2015
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
firewall.py: Firewall specific properties and access methods
"""
import json
from pybvc.common.utils import strip_none, remove_empty_from_dict, dict_keys_underscored_to_dashed
#-------------------------------------------------------------------------------
# Class 'DataPlaneInterface'
#-------------------------------------------------------------------------------
class DataPlaneInterface():
''' Class representing a dataplane interface '''
def __init__(self, name):
''' Dataplane interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' DHCPv6 options (container) '''
self.dhcpv6_options = None
''' IPv4 parameters (container) '''
self.ip = None
''' IPv6 parameters (container) '''
self.ipv6 = None
''' Maximum Transmission Unit (MTU) '''
self.mtu = None
''' Disable interface '''
self.disable = None
''' Virtual Interface (VIF) ID (list) '''
self.vif = []
''' Enable/Disable sflow for interface '''
self.sflow = None
''' IP address (list) '''
self.address = []
''' Media Access Control (MAC) address '''
self.mac = None
''' Ignore link state changes '''
self.disable_link_detect = None
''' This interface bridge group (container) '''
self.bridge_group = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_string(self):
""" Return this object as a string """
return str(vars(self))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_description(self, description):
self.description = description
# TBD
def set_dhcpv6_options(self, TBD):
pass
# TBD
def set_ipv4_options(self, TBD):
pass
# TBD
def set_ipv6_options(self, TBD):
pass
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_vif(self, vif_id):
self.vif.append(vif_id)
def set_sflow(self, value):
if (value == True):
self.sflow = ""
else:
self.sflow = None
def set_address(self, address):
self.address.append(address)
def set_mac(self, mac):
self.mac = mac
def set_disable_link_detect(self, value):
if (value == True):
self.disable_link_detect = ""
else:
self.disable_link_detect = None
# TBD
def set_bridge_group(self, TBD):
pass
#-------------------------------------------------------------------------------
# Class 'OpenVpnInterface'
#-------------------------------------------------------------------------------
class OpenVpnInterface():
''' Class representing an OpenVPN tunnel interface '''
_mn1 = "vyatta-interfaces:interfaces"
_mn2 = "vyatta-interfaces-openvpn:openvpn"
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, name):
''' OpenVPN tunnel interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' OpenVPN authentication method (container) '''
self.auth = None
''' Hashing algorithm option
enumeration: 'md5', 'sha1', 'sha256', 'sha512' '''
self.hash = None
''' Interface to be disabled '''
self.disable = None
''' Server-mode options (container) '''
self.server = None
''' OpenVPN interface device-type '''
self.device_type = None
''' File containing the secret key shared with remote end of tunnel '''
self.shared_secret_key_file = None
''' Data encryption algorithm option
enumeration: 'des', '3des', 'bf128', 'bf256', 'aes128', 'aes192', 'aes256' '''
self.encryption = None
''' Additional OpenVPN options (list) '''
self.openvpn_option = []
''' Local IP address or network address '''
self.local_address = None
''' Local port number to accept connections (range 1..65535) '''
self.local_port = None
''' Local IP address to accept connections (all if not set) '''
self.local_host = None
''' IP address of remote end of tunnel '''
self.remote_address = None
''' Remote port number to connect to '''
self.remote_port = None
''' Remote host to connect to (dynamic if not set) '''
self.remote_host = []
''' Transport Layer Security (TLS) options (container) '''
self.tls = TlsOptions()
''' OpenVPN mode of operation
enumeration: 'site-to-site', 'client', 'server' '''
self.mode = None
''' OpenVPN tunnel to be used as the default route (container)'''
self.replace_default_route = None
''' OpenVPN communication protocol
enumeration: 'udp', 'tcp-passive', 'tcp-active' '''
self.protocol = None
''' IPv4 parameters (container) '''
self.ip = None
''' IPv6 parameters (container) '''
self.ipv6 = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_string(self):
""" Return this object as a string """
return str(vars(self))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_payload(self):
""" Return this object as a payload for HTTP request """
s = self.to_json()
obj = json.loads(s)
obj1 = strip_none(obj)
obj2 = remove_empty_from_dict(obj1)
obj3 = dict_keys_underscored_to_dashed(obj2)
payload = {self._mn1: {self._mn2:[obj3]}}
return json.dumps(payload, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_description(self, description):
self.description = description
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_mode(self, mode):
self.mode = mode
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_shared_secret_key_file(self, path):
self.shared_secret_key_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_local_address(self, addr):
self.local_address = addr
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_remote_address(self, addr):
self.remote_address = addr
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_remote_host(self, addr):
self.remote_host.append(addr)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_role(self, role):
self.tls.set_role(role)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_dh_file(self, path):
self.tls.set_dh_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_ca_cert_file(self, path):
self.tls.set_ca_cert_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_cert_file(self, path):
self.tls.set_cert_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_crl_file(self, path):
self.tls.set_crl_file(path)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_tls_key_file(self, path):
self.tls.set_key_file(path)
#-------------------------------------------------------------------------------
# Class 'TlsOptions'
#-------------------------------------------------------------------------------
class TlsOptions():
''' Transport Layer Security (TLS) options
Helper class of the 'OpenVpnInterface' class '''
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self):
''' Role in TLS negotiation
enumeration: 'active', 'passive' '''
self.role = None
''' File containing Diffie Hellman parameters (server only) '''
self.dh_file = None
''' File containing certificate for Certificate Authority (CA) '''
self.ca_cert_file = None
''' File containing certificate for this host '''
self.cert_file = None
''' File containing certificate revocation list (CRL) for this host '''
self.crl_file = None
''' File containing this host's private key '''
self.key_file = None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_role(self, role):
self.role = role
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_dh_file(self, path):
self.dh_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_ca_cert_file(self, path):
self.ca_cert_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_cert_file(self, path):
self.cert_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_crl_file(self, path):
self.crl_file = path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def set_key_file(self, path):
self.key_file = path
#-------------------------------------------------------------------------------
# Class 'VirtualTunnelInterface'
#-------------------------------------------------------------------------------
class VirtualTunnelInterface():
''' Class representing a Virtual tunnel interface (VTI) '''
def __init__(self, name):
''' Virtual tunnel interface name '''
self.tagnode = name
''' Description for the interface '''
self.description = None
''' Maximum Transmission Unit (MTU), range 68..9000 '''
self.mtu = None
''' Disable this interface '''
self.disable = None
''' IPv4 or IPv6 Prefixes'''
self.address = []
''' IPv4 parameters '''
self.ip = None
''' IPv6 parameters '''
self.ipv6 = None
def to_string(self):
""" Return this object as a string """
return str(vars(self))
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_address(self, address):
self.address.append(address)
| 35.666667 | 98 | 0.406417 |
import json
from pybvc.common.utils import strip_none, remove_empty_from_dict, dict_keys_underscored_to_dashed
class DataPlaneInterface():
def __init__(self, name):
self.tagnode = name
self.description = None
self.dhcpv6_options = None
self.ip = None
self.ipv6 = None
self.mtu = None
self.disable = None
self.vif = []
self.sflow = None
self.address = []
self.mac = None
self.disable_link_detect = None
self.bridge_group = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_dhcpv6_options(self, TBD):
pass
def set_ipv4_options(self, TBD):
pass
def set_ipv6_options(self, TBD):
pass
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_vif(self, vif_id):
self.vif.append(vif_id)
def set_sflow(self, value):
if (value == True):
self.sflow = ""
else:
self.sflow = None
def set_address(self, address):
self.address.append(address)
def set_mac(self, mac):
self.mac = mac
def set_disable_link_detect(self, value):
if (value == True):
self.disable_link_detect = ""
else:
self.disable_link_detect = None
def set_bridge_group(self, TBD):
pass
class OpenVpnInterface():
_mn1 = "vyatta-interfaces:interfaces"
_mn2 = "vyatta-interfaces-openvpn:openvpn"
def __init__(self, name):
self.tagnode = name
self.description = None
self.auth = None
self.hash = None
self.disable = None
self.server = None
self.device_type = None
self.shared_secret_key_file = None
self.encryption = None
self.openvpn_option = []
self.local_address = None
self.local_port = None
self.local_host = None
self.remote_address = None
self.remote_port = None
self.remote_host = []
self.tls = TlsOptions()
self.mode = None
self.replace_default_route = None
self.protocol = None
self.ip = None
self.ipv6 = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def get_payload(self):
s = self.to_json()
obj = json.loads(s)
obj1 = strip_none(obj)
obj2 = remove_empty_from_dict(obj1)
obj3 = dict_keys_underscored_to_dashed(obj2)
payload = {self._mn1: {self._mn2:[obj3]}}
return json.dumps(payload, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mode(self, mode):
self.mode = mode
def set_shared_secret_key_file(self, path):
self.shared_secret_key_file = path
def set_local_address(self, addr):
self.local_address = addr
def set_remote_address(self, addr):
self.remote_address = addr
def set_remote_host(self, addr):
self.remote_host.append(addr)
def set_tls_role(self, role):
self.tls.set_role(role)
def set_tls_dh_file(self, path):
self.tls.set_dh_file(path)
def set_tls_ca_cert_file(self, path):
self.tls.set_ca_cert_file(path)
def set_tls_cert_file(self, path):
self.tls.set_cert_file(path)
def set_tls_crl_file(self, path):
self.tls.set_crl_file(path)
def set_tls_key_file(self, path):
self.tls.set_key_file(path)
class TlsOptions():
def __init__(self):
self.role = None
self.dh_file = None
self.ca_cert_file = None
self.cert_file = None
self.crl_file = None
self.key_file = None
def set_role(self, role):
self.role = role
def set_dh_file(self, path):
self.dh_file = path
def set_ca_cert_file(self, path):
self.ca_cert_file = path
def set_cert_file(self, path):
self.cert_file = path
def set_crl_file(self, path):
self.crl_file = path
def set_key_file(self, path):
self.key_file = path
class VirtualTunnelInterface():
def __init__(self, name):
self.tagnode = name
self.description = None
self.mtu = None
self.disable = None
self.address = []
self.ip = None
self.ipv6 = None
def to_string(self):
return str(vars(self))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def set_description(self, description):
self.description = description
def set_mtu(self, mtu):
self.mtu = mtu
def set_disable(self, value):
if (value == True):
self.disable = ""
else:
self.disable = None
def set_address(self, address):
self.address.append(address)
| true | true |
f724dbc632ab957d93fb0b05c7dd5db1e521ac4b | 1,048 | py | Python | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | RosViewer.py | MikeHallettUK/RosRobotics | 953486cfd042d6adec1edaf425243eac0f473571 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# RosViewer.py = node that listens to a ROS image message topic,
# and displays the image using OpenCV.
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_viewer: # "/camera/color/image_raw" or "/camera/color/video"
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.ros_cb, queue_size=1, buff_size=2 ** 24)
def ros_cb(self,msg):
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") # in msg.data as "rgb8" but is "bgr8" from RS camera ??
cv2.imshow("Ros video", cv_image)
key = cv2.waitKey(10) # in milliseconds
if key == 113: # 113 is the letter 'q'
cv2.destroyAllWindows()
rospy.signal_shutdown("Quitting")
print("Starting Ros video image_viewer v1.2 ; press q to quit in video-window.")
rospy.init_node('image_viewer', anonymous=True)
iv = image_viewer()
rospy.spin()
print("Finished")
| 36.137931 | 117 | 0.676527 |
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_viewer:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.ros_cb, queue_size=1, buff_size=2 ** 24)
def ros_cb(self,msg):
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imshow("Ros video", cv_image)
key = cv2.waitKey(10)
if key == 113:
cv2.destroyAllWindows()
rospy.signal_shutdown("Quitting")
print("Starting Ros video image_viewer v1.2 ; press q to quit in video-window.")
rospy.init_node('image_viewer', anonymous=True)
iv = image_viewer()
rospy.spin()
print("Finished")
| true | true |
f724dd1a39a7f175e46aa6568a64a8dd26d6775b | 251 | py | Python | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/OneLogin/Roles/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.OneLogin.Roles.ListAll import ListAll, ListAllInputSet, ListAllResultSet, ListAllChoreographyExecution
from temboo.Library.OneLogin.Roles.ShowRole import ShowRole, ShowRoleInputSet, ShowRoleResultSet, ShowRoleChoreographyExecution
| 83.666667 | 127 | 0.888446 | from temboo.Library.OneLogin.Roles.ListAll import ListAll, ListAllInputSet, ListAllResultSet, ListAllChoreographyExecution
from temboo.Library.OneLogin.Roles.ShowRole import ShowRole, ShowRoleInputSet, ShowRoleResultSet, ShowRoleChoreographyExecution
| true | true |
f724dd6c5a854504a4b01aac06593f75753a45b0 | 4,911 | py | Python | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | ttt.py | JotaGo/tic-tac-toe | 237288f84bf388c219f6b5cf6cbae6334bfddb26 | [
"MIT"
] | null | null | null | import random
#GLOBAL VARIABLE
ttt = [[1,2,3],[4,5,6],[7,8,9]]
#PRINTING THE BOARD FUNCTION
def printing():
print()
for i , j in enumerate(ttt):
if i > 0:
print('---------')
print(j[0],'|',j[1],'|',j[2])
print()
#RESET THE BOARD
## WITH THIS FUNCTION THE USER CAN RESET BOARD TO PLAY AGAIN
## THIS FUNCTION WORKS FILLING THE LIST IN ORDER FROM ONE TO NINE
def reset_board():
nav1 , nav2 , cnt = 0 , 0 , 1
while nav1 < 3:
while nav2 < 3:
if ttt[nav1][nav2] != cnt:
ttt[nav1][nav2] = cnt
cnt += 1
nav2 +=1
nav2 = 0
nav1 +=1
def reset_game():
print()
while True:
user_o = input('Do you want to play again? (Y/n)\n')
if user_o.lower() == 'y':
reset_board()
return True
elif user_o.lower() == 'n':
return False
else:
print()
print('please enter a valid option')
#WINNING METHODS
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A ROW
def winning_row():
for i in ttt:
cnt = 0
aux = i[0]
for j in i:
if aux == j:
cnt += 1
if cnt == 3 and aux == 'x':
return 'you win'
elif cnt == 3 and aux == 'o':
return 'you lose'
return False
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A COLUMN
def winning_column():
nav1 , nav2 , cnt = 0 , 0 , 0
while nav2 < 3:
while nav1 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2]:
nav1 += 1
cnt += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
nav1 = 0
break
nav2 += 1
return False
##THIS FUNCTION WILL DETECT IF ARE A MATCH OF THREE X OR O IN A DIAGONAL
def winning_diagonal():
nav1,nav2,cnt = 0,0,0
while nav1 < 2 and nav2 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 + 1]:
cnt += 1
nav1 += 1
nav2 += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
cnt = 0
nav1 = 0
nav2 = len(ttt[nav1]) - 1
break
while True:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 - 1]:
cnt += 1
nav1 += 1
nav2 -= 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
break
return False
###THIS FUNCTION IS TO AVOID REPEATING THE SAME CONSULT IN ALL OF THE WINNING METHODS
def win_declaretion(nav1,nav2):
if ttt[nav1][nav2] == 'x':
return 'you win'
elif ttt[nav1][nav2] == 'o':
return 'you lose'
#USER OPTION
def selection(opt):
nav1 , nav2 = 0 , 0
while nav1 < 3:
while nav2 < 3:
if opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'x'
find = True
return find
else:
find = False
nav2 += 1
nav2 = 0
nav1 += 1
return find
#THIS FUNCTION WILL SELECT RANDOMLY A OPTION FOR THE CPU
##WITHOUT THE METHODS OF WINNING IN THE MAIN FUNCTION THE GAME WILL CRASH
##BECAUSE AT THE END IT WILL ENTER IN A INFINITE LOOP LOOKING FOR A AVAILABLE SPOT
def cpu_option():
while True:
nav1 , nav2 = 0 , 0
cpu_opt = random.randint(1,9)
while nav1 < 3:
while nav2 < 3:
if cpu_opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'o'
find = True
return find
nav2 += 1
nav2 = 0
nav1 += 1
def end_game(final):
if final == 'you win':
print('congratulations you win!')
return True
elif final == 'you lose':
print('how sad, you lose :(')
return True
if __name__ == "__main__":
on = True
flag = False
while on:
printing()
option = int(input('Select a spot of the board: '))
while not selection(option):
print('that spot is occupied')
printing()
option = int(input('Select a spot of the board: '))
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False
cpu_option()
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False | 27.283333 | 85 | 0.476074 | import random
ttt = [[1,2,3],[4,5,6],[7,8,9]]
def printing():
print()
for i , j in enumerate(ttt):
if i > 0:
print('---------')
print(j[0],'|',j[1],'|',j[2])
print()
nav2] != cnt:
ttt[nav1][nav2] = cnt
cnt += 1
nav2 +=1
nav2 = 0
nav1 +=1
def reset_game():
print()
while True:
user_o = input('Do you want to play again? (Y/n)\n')
if user_o.lower() == 'y':
reset_board()
return True
elif user_o.lower() == 'n':
return False
else:
print()
print('please enter a valid option')
= i[0]
for j in i:
if aux == j:
cnt += 1
if cnt == 3 and aux == 'x':
return 'you win'
elif cnt == 3 and aux == 'o':
return 'you lose'
return False
v2 < 3:
while nav1 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2]:
nav1 += 1
cnt += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
nav1 = 0
break
nav2 += 1
return False
nd nav2 < 2:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 + 1]:
cnt += 1
nav1 += 1
nav2 += 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
cnt = 0
nav1 = 0
nav2 = len(ttt[nav1]) - 1
break
while True:
if ttt[nav1][nav2] == ttt[nav1 + 1][nav2 - 1]:
cnt += 1
nav1 += 1
nav2 -= 1
if cnt == 2:
return win_declaretion(nav1,nav2)
else:
break
return False
):
nav1 , nav2 = 0 , 0
while nav1 < 3:
while nav2 < 3:
if opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'x'
find = True
return find
else:
find = False
nav2 += 1
nav2 = 0
nav1 += 1
return find
if cpu_opt == ttt[nav1][nav2]:
ttt[nav1][nav2] = 'o'
find = True
return find
nav2 += 1
nav2 = 0
nav1 += 1
def end_game(final):
if final == 'you win':
print('congratulations you win!')
return True
elif final == 'you lose':
print('how sad, you lose :(')
return True
if __name__ == "__main__":
on = True
flag = False
while on:
printing()
option = int(input('Select a spot of the board: '))
while not selection(option):
print('that spot is occupied')
printing()
option = int(input('Select a spot of the board: '))
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False
cpu_option()
if not flag:
flag = winning_row()
if not flag:
flag = winning_column()
if not flag:
flag = winning_diagonal()
if flag:
printing()
end_game(flag)
on = reset_game()
if on:
flag = False | true | true |
f724dd876dd86bd7229b96394df79995ae66159a | 2,035 | py | Python | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | test/TestShellWithoutPipefail.py | chilicheech/ansible-lint | 57d4d3346179bb4142aeb7218dbf5f91befcab72 | [
"MIT"
] | null | null | null | # pylint: disable=preferred-module # FIXME: remove once migrated per GH-725
import unittest
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.ShellWithoutPipefail import ShellWithoutPipefail
from ansiblelint.testing import RunFromText
FAIL_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline without pipefail
shell: false | cat
- name: pipeline with or and pipe, no pipefail
shell: false || true | cat
- shell: |
df | grep '/dev'
'''
SUCCESS_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline with pipefail
shell: set -o pipefail && false | cat
- name: pipeline with pipefail, multi-line
shell: |
set -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -eo pipefail
false | cat
- name: pipeline without pipefail, ignoring errors
shell: false | cat
ignore_errors: true
- name: non-pipeline without pipefail
shell: "true"
- name: command without pipefail
command: "true"
- name: shell with or
shell:
false || true
- shell: |
set -o pipefail
df | grep '/dev'
- name: should not fail due to ignore_errors being true
shell: false | cat
ignore_errors: true
'''
class TestShellWithoutPipeFail(unittest.TestCase):
collection = RulesCollection()
collection.register(ShellWithoutPipefail())
def setUp(self):
self.runner = RunFromText(self.collection)
def test_fail(self):
results = self.runner.run_playbook(FAIL_TASKS)
self.assertEqual(3, len(results))
def test_success(self):
results = self.runner.run_playbook(SUCCESS_TASKS)
self.assertEqual(0, len(results))
| 22.865169 | 76 | 0.633907 | import RulesCollection
from ansiblelint.rules.ShellWithoutPipefail import ShellWithoutPipefail
from ansiblelint.testing import RunFromText
FAIL_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline without pipefail
shell: false | cat
- name: pipeline with or and pipe, no pipefail
shell: false || true | cat
- shell: |
df | grep '/dev'
'''
SUCCESS_TASKS = '''
---
- hosts: localhost
become: no
tasks:
- name: pipeline with pipefail
shell: set -o pipefail && false | cat
- name: pipeline with pipefail, multi-line
shell: |
set -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -e -x -o pipefail
false | cat
- name: pipeline with pipefail, complex set
shell: |
set -eo pipefail
false | cat
- name: pipeline without pipefail, ignoring errors
shell: false | cat
ignore_errors: true
- name: non-pipeline without pipefail
shell: "true"
- name: command without pipefail
command: "true"
- name: shell with or
shell:
false || true
- shell: |
set -o pipefail
df | grep '/dev'
- name: should not fail due to ignore_errors being true
shell: false | cat
ignore_errors: true
'''
class TestShellWithoutPipeFail(unittest.TestCase):
collection = RulesCollection()
collection.register(ShellWithoutPipefail())
def setUp(self):
self.runner = RunFromText(self.collection)
def test_fail(self):
results = self.runner.run_playbook(FAIL_TASKS)
self.assertEqual(3, len(results))
def test_success(self):
results = self.runner.run_playbook(SUCCESS_TASKS)
self.assertEqual(0, len(results))
| true | true |
f724de43aa9b83eb0afc55ae9f946720ab6db30a | 38,330 | py | Python | tests/system/robot/chromeTests.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 1,592 | 2015-11-10T12:05:44.000Z | 2022-03-31T11:50:40.000Z | tests/system/robot/chromeTests.py | krzysz00/nvda | d34444242a529098499131165a3e60d5a05ac96f | [
"bzip2-1.0.6"
] | 9,479 | 2015-11-10T20:56:48.000Z | 2022-03-31T23:51:30.000Z | tests/system/robot/chromeTests.py | TheQuinbox/nvda | 9c7b763a2428b43802758a3859de8708cefcd4a0 | [
"bzip2-1.0.6"
] | 682 | 2015-11-10T11:19:23.000Z | 2022-03-31T07:51:29.000Z | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020-2021 NV Access Limited, Leonard de Ruijter
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""Logic for NVDA + Google Chrome tests
"""
import os
from robot.libraries.BuiltIn import BuiltIn
# imported methods start with underscore (_) so they don't get imported into robot files as keywords
from SystemTestSpy import (
_getLib,
)
# Imported for type information
from ChromeLib import ChromeLib as _ChromeLib
from AssertsLib import AssertsLib as _AssertsLib
import NvdaLib as _NvdaLib
_builtIn: BuiltIn = BuiltIn()
_chrome: _ChromeLib = _getLib("ChromeLib")
_asserts: _AssertsLib = _getLib("AssertsLib")
#: Double space is used to separate semantics in speech output this typically
# adds a slight pause to the synthesizer.
SPEECH_SEP = " "
SPEECH_CALL_SEP = '\n'
#: single space is used to separate semantics in braille output.
BRAILLE_SEP = " "
ARIAExamplesDir = os.path.join(
_NvdaLib._locations.repoRoot, "include", "w3c-aria-practices", "examples"
)
def checkbox_labelled_by_inner_element():
_chrome.prepareChrome(
r"""
<div tabindex="0" role="checkbox" aria-labelledby="inner-label">
<div style="display:inline" id="inner-label">
Simulate evil cat
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterTab()
_asserts.strings_match(
actualSpeech,
# The name for the element is also in it's content, the name is spoken twice:
# "Simulate evil cat Simulate evil cat check box not checked"
# Instead this should be spoken as:
"Simulate evil cat check box not checked"
)
def test_mark_aria_details():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark aria-details="cat-details">cat</mark> has a comment tied to it.</p>
<div id="cat-details" role="comment">
Cats go woof BTW<br>—Jonathon Commentor
<div role="comment">
No they don't<br>—Zara
</div>
<div role="form">
<textarea cols="80" placeholder="Add reply..."></textarea>
<input type="submit">
</div>
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted has details cat out of highlighted has a comment tied to it."
)
# this word has no details attached
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"word"
)
# check that there is no summary reported
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"No additional details"
)
# this word has details attached to it
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"highlighted has details cat out of highlighted"
)
# read the details summary
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"Cats go woof BTW Jonathon Commentor No they don't Zara Submit"
)
def announce_list_item_when_moving_by_word_or_character():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>Before list</p>
<ul style="list-style-type:none">
<li>small cat</li>
<li>big dog</li>
</ul>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Before list"
)
# Ensure that moving into a list by line, "list item" is not reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list small cat"
)
# Ensure that when moving by word (control+rightArrow)
# within the list item, "list item" is not announced.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"cat"
)
# Ensure that when moving by character (rightArrow)
# within the list item, "list item" is not announced.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
"a"
)
# move to the end of the line (and therefore the list item)
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"blank"
)
# Ensure that when moving by character (rightArrow)
# onto the next list item, "list item" is reported.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"list item level 1",
"b"
])
)
# Ensure that when moving by character (leftArrow)
# onto the previous list item, "list item" is reported.
# Note this places us on the end-of-line insertion point of the previous list item.
actualSpeech = _chrome.getSpeechAfterKey("leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
# Ensure that when moving by word (control+rightArrow)
# onto the next list item, "list item" is reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1 big"
)
# Ensure that when moving by word (control+leftArrow)
# onto the previous list item, "list item" is reported.
# Note this places us on the end-of-line insertion point of the previous list item.
actualSpeech = _chrome.getSpeechAfterKey("control+leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
def test_i7562():
""" List should not be announced on every line of a ul in a contenteditable """
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>before</p>
<ul>
<li>frogs</li>
<li>birds</li>
</ul>
<p>after</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable before"
)
# DownArow into the list. 'list' should be announced when entering.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list bullet frogs"
)
# DownArrow to the second list item. 'list' should not be announced.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"bullet birds"
)
# DownArrow out of the list. 'out of list' should be announced.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of list after",
)
def test_pr11606():
"""
Announce the correct line when placed at the end of a link at the end of a list item in a contenteditable
"""
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<ul>
<li><a href="#">A</a> <a href="#">B</a></li>
<li>C D</li>
</ul>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
# Tab into the contenteditable
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable list bullet link A link B"
)
# move past the end of the first link.
# This should not be affected due to pr #11606.
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"out of link",
"space"
])
)
# Move to the end of the line (which is also the end of the second link)
# Before pr #11606 this would have announced the bullet on the next line.
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"link"
)
# Read the current line.
# Before pr #11606 the next line ("C D") would have been read.
actualSpeech = _chrome.getSpeechAfterKey("NVDA+upArrow")
_asserts.strings_match(
actualSpeech,
"bullet link A link B"
)
def test_ariaTreeGrid_browseMode():
"""
Ensure that ARIA treegrids are accessible as a standard table in browse mode.
"""
testFile = os.path.join(ARIAExamplesDir, "treegrid", "treegrid-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Treegrid Email Inbox Example heading level 1"
)
# Tab to the first link.
# This ensures that focus is totally within the iframe
# so as to not cause focus to hit the iframe's document
# when entering focus mode on the treegrid later.
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"issue 790. link"
)
# Jump to the ARIA treegrid with the next table quicknav command.
# The browse mode caret will be inside the table on the caption before the first row.
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"Inbox table clickable with 5 rows and 3 columns Inbox"
)
# Move past the caption onto row 1 with downArrow
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"row 1 column 1 Subject"
)
# Navigate to row 2 column 1 with NVDA table navigation command
actualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
actualSpeech,
"expanded level 1 row 2 Treegrids are awesome"
)
# Press enter to activate NVDA focus mode and focus the current row
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
# focus mode turns on
"Focus mode",
# Focus enters the ARIA treegrid (table)
"Inbox table",
# Focus lands on row 2
"level 1 Treegrids are awesome Want to learn how to use them? aaron at thegoogle dot rocks expanded",
])
)
def ARIAInvalid_spellingAndGrammar():
"""
Tests ARIA invalid values of "spelling", "grammar" and "spelling, grammar".
Please note that although IAccessible2 allows multiple values for invalid,
multiple values to aria-invalid is not yet standard.
And even if it were, they would be separated by space, not comma
thus the html for this test would need to change,
but the expected output shouldn't need to.
"""
_chrome.prepareChrome(
r"""
<p>Big <span aria-invalid="spelling">caat</span> meos</p>
<p>Small <span aria-invalid="grammar">a dog</span> woofs</p>
<p>Fat <span aria-invalid="grammar, spelling">a ffrog</span> crokes</p>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Big spelling error caat meos"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Small grammar error a dog woofs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Fat spelling error grammar error a ffrog crokes"
)
def test_ariaCheckbox_browseMode():
"""
Navigate to an unchecked checkbox in reading mode.
"""
testFile = os.path.join(ARIAExamplesDir, "checkbox", "checkbox-1", "checkbox-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Checkbox Example (Two State) heading level 1"
)
# Navigate to the checkbox.
actualSpeech = _chrome.getSpeechAfterKey("x")
_asserts.strings_match(
actualSpeech,
"Sandwich Condiments grouping list with 4 items Lettuce check box not checked"
)
def test_i12147():
"""
New focus target should be announced if the triggering element is removed when activated.
"""
_chrome.prepareChrome(
f"""
<div>
<button id='trigger0'>trigger 0</button>
<h4 id='target0' tabindex='-1'>target 0</h4>
</div>
<script>
let trigger0 = document.querySelector('#trigger0');
trigger0.addEventListener('click', e => {{
let focusTarget = document.querySelector('#target0');
trigger0.remove();
focusTarget.focus();
}})
</script>
"""
)
# Jump to the first button (the trigger)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"trigger 0 button"
)
# Activate the button, we should hear the new focus target.
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
"target 0 heading level 4"
)
def test_tableInStyleDisplayTable():
"""
Chrome treats nodes with `style="display: table"` as tables.
When a HTML style table is positioned in such a node, NVDA was previously unable to announce
table row and column count as well as provide table navigation for the inner table.
"""
_chrome.prepareChrome(
"""
<p>Paragraph</p>
<div style="display:table">
<table>
<thead>
<tr>
<th>First heading</th>
<th>Second heading</th>
</tr>
</thead>
<tbody>
<tr>
<td>First content cell</td>
<td>Second content cell</td>
</tr>
</tbody>
</table>
</div>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 2 rows and 2 columns row 1 column 1 First heading"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
nextActualSpeech,
"row 2 First content cell"
)
def test_ariaRoleDescription_focus():
"""
NVDA should report the custom role of an object on focus.
"""
_chrome.prepareChrome(
"""
<button aria-roledescription="pizza">Cheese</button><br />
<button aria-roledescription="pizza">Meat</button>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Cheese pizza"
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Meat pizza"
)
def test_ariaRoleDescription_inline_browseMode():
"""
NVDA should report the custom role for inline elements in browse mode.
"""
_chrome.prepareChrome(
"""
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
"""
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_browseMode():
"""
NVDA should report the custom role at start and end for block elements in browse mode.
"""
_chrome.prepareChrome(
"""
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
"""
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def test_ariaRoleDescription_inline_contentEditable():
"""
NVDA should report the custom role for inline elements in content editables.
"""
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_contentEditable():
"""
NVDA should report the custom role at start and end for block elements in content editables.
"""
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def _getAriaDescriptionSample() -> str:
annotation = "User nearby, Aaron"
linkDescription = "opens in a new tab"
# link title should be read in focus
linkTitle = "conduct a search"
linkContents = "to google's"
return f"""
<div>
<div
contenteditable=""
spellcheck="false"
role="textbox"
aria-multiline="true"
><p>This is a line with no annotation</p>
<p><span
aria-description="{annotation}"
>Here is a sentence that is being edited by someone else.</span>
<b>Multiple can edit this.</b></p>
<p>An element with a role, follow <a
href="www.google.com"
aria-description="{linkDescription}"
>{linkContents}</a
> website</p>
<p>Testing the title attribute, <a
href="www.google.com"
title="{linkTitle}"
>{linkContents}</a
> website</p>
</div>
</div>
"""
def test_ariaDescription_focusMode():
""" Ensure aria description is read in focus mode.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
# Focus the contenteditable and automatically switch to focus mode (due to contenteditable)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation\nFocus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's", # link contents (name)
"website" # paragraph text
])
)
def test_ariaDescription_browseMode():
""" Ensure aria description is read in browse mode.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's", # link contents (name)
"website" # paragraph text
])
)
def test_ariaDescription_sayAll():
""" Ensure aria description is read by say all.
# Historically, description was not announced at all in browse mode with arrow navigation,
# annotations are now a special case.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
- annotations.reportAriaDescription default:True
"""
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("NVDA+downArrow")
# Reporting aria-description only supported in:
# - Chrome 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"Test page load complete",
"edit multi line This is a line with no annotation",
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
]),
SPEECH_SEP.join([ # two space separator
"An element with a role, follow", # paragraph text
"link", # link role
"opens in a new tab", # link description
"to google's", # link contents (name)
"website", # paragraph text
]),
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
# note description missing when sourced from title attribute
"to google's", # link contents (name)
"website", # paragraph text
"out of edit"
]),
"After Test Case Marker"
])
)
def test_i10840():
"""
The name of table header cells should only be conveyed once when navigating directly to them in browse mode
Chrome self-references a header cell as its own header, which used to cause the name to be announced twice
"""
_chrome.prepareChrome(
f"""
<table>
<thead>
<tr>
<th>Month</th>
<th>items</th>
</tr>
</thead>
<tbody>
<tr>
<td>January</td>
<td>100</td>
</tr>
<tr>
<td>February</td>
<td>80</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Sum</td>
<td>180</td>
</tr>
</tfoot>
</table>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 4 rows and 2 columns row 1 column 1 Month"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+rightArrow")
_asserts.strings_match(
nextActualSpeech,
"column 2 items"
)
def test_mark_browse():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark>Kangaroo</mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted Kangaroo out of highlighted is important."
)
# Test moving by word
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"word"
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"highlighted Kangaroo out of highlighted"
)
def test_mark_focus():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark><a href="#">Kangaroo</a></mark> is important.</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"highlighted\nKangaroo link"
)
def test_preventDuplicateSpeechFromDescription_browse_tab():
"""
When description matches name/content, it should not be spoken.
This prevents duplicate speech.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
"""
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
# Read in browse
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def preventDuplicateSpeechFromDescription_focus():
"""
When description matches name/content, it should not be spoken.
This prevents duplicate speech.
Settings which may affect this:
- speech.reportObjectDescriptions default:True
"""
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def test_ensureNoBrowseModeDescription():
"""
Test that option (speech.reportObjectDescriptions default:True)
does not result in description in browse mode.
"""
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy = _NvdaLib.getSpyLib()
# prevent browse / focus mode messages from interfering, 0 means don't show.
spy.set_configValue(["braille", "messageTimeout"], 0)
_chrome.prepareChrome(
"\n".join([
r'<button>something for focus</button>'
r'<a href="#" style="display:block" title="Cat">Apple</a>',
# second link to make testing second focus mode tab easier
r'<a href="#" style="display:block" title="Fish">Banana</a>',
])
)
actualSpeech = _NvdaLib.getSpeechAfterKey('tab')
_builtIn.should_contain(actualSpeech, "something for focus")
# Test Browse mode
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
# Test focus mode
actualSpeech = _NvdaLib.getSpeechAfterKey("nvda+space")
_asserts.speech_matches(actualSpeech, "Focus mode")
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Apple", # link name / contents
"link", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Apple", # link name / contents
"lnk", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
# Use second link to test focus mode when 'reportObjectDescriptions' is off.
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Banana", # link name / contents
"link", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Banana", # link name / contents
"lnk", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
def test_quickNavTargetReporting():
"""
When using quickNav, the target object should be spoken first, inner context should be given before outer
context.
"""
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<h1>Quick Nav Target</h1>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Quick nav to heading
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
])
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("control+home")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Before Test Case Marker",
])
)
# Quick nav to heading with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
])
)
def test_focusTargetReporting():
"""
When moving focus the target object should be spoken first, inner context should be given before outer
context.
"""
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<a href="#">before Target</a>
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<a href="#">Focus Target</a>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Set focus
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
message="browse mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
]),
message="browse mode - focus with Report Articles enabled"
)
# Reset to allow trying again in focus mode
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
spy.set_configValue(REPORT_ARTICLES, False)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles enabled"
)
| 28.776276 | 109 | 0.688469 |
import os
from robot.libraries.BuiltIn import BuiltIn
from SystemTestSpy import (
_getLib,
)
# Imported for type information
from ChromeLib import ChromeLib as _ChromeLib
from AssertsLib import AssertsLib as _AssertsLib
import NvdaLib as _NvdaLib
_builtIn: BuiltIn = BuiltIn()
_chrome: _ChromeLib = _getLib("ChromeLib")
_asserts: _AssertsLib = _getLib("AssertsLib")
#: Double space is used to separate semantics in speech output this typically
# adds a slight pause to the synthesizer.
SPEECH_SEP = " "
SPEECH_CALL_SEP = '\n'
#: single space is used to separate semantics in braille output.
BRAILLE_SEP = " "
ARIAExamplesDir = os.path.join(
_NvdaLib._locations.repoRoot, "include", "w3c-aria-practices", "examples"
)
def checkbox_labelled_by_inner_element():
_chrome.prepareChrome(
r"""
<div tabindex="0" role="checkbox" aria-labelledby="inner-label">
<div style="display:inline" id="inner-label">
Simulate evil cat
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterTab()
_asserts.strings_match(
actualSpeech,
# The name for the element is also in it's content, the name is spoken twice:
"Simulate evil cat check box not checked"
)
def test_mark_aria_details():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark aria-details="cat-details">cat</mark> has a comment tied to it.</p>
<div id="cat-details" role="comment">
Cats go woof BTW<br>—Jonathon Commentor
<div role="comment">
No they don't<br>—Zara
</div>
<div role="form">
<textarea cols="80" placeholder="Add reply..."></textarea>
<input type="submit">
</div>
</div>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted has details cat out of highlighted has a comment tied to it."
)
# this word has no details attached
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"word"
)
# check that there is no summary reported
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"No additional details"
)
# this word has details attached to it
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"highlighted has details cat out of highlighted"
)
# read the details summary
actualSpeech = _chrome.getSpeechAfterKey("NVDA+\\")
_asserts.strings_match(
actualSpeech,
"Cats go woof BTW Jonathon Commentor No they don't Zara Submit"
)
def announce_list_item_when_moving_by_word_or_character():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>Before list</p>
<ul style="list-style-type:none">
<li>small cat</li>
<li>big dog</li>
</ul>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Before list"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list small cat"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"cat"
)
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
"a"
)
actualSpeech = _chrome.getSpeechAfterKey("end")
_asserts.strings_match(
actualSpeech,
"blank"
)
actualSpeech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"list item level 1",
"b"
])
)
actualSpeech = _chrome.getSpeechAfterKey("leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1 big"
)
actualSpeech = _chrome.getSpeechAfterKey("control+leftArrow")
_asserts.strings_match(
actualSpeech,
"list item level 1"
)
def test_i7562():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<p>before</p>
<ul>
<li>frogs</li>
<li>birds</li>
</ul>
<p>after</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable before"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"list bullet frogs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"bullet birds"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of list after",
)
def test_pr11606():
_chrome.prepareChrome(
r"""
<div contenteditable="true">
<ul>
<li><a href="#">A</a> <a href="#">B</a></li>
<li>C D</li>
</ul>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable list bullet link A link B"
)
Speech = _chrome.getSpeechAfterKey("rightArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"out of link",
"space"
])
)
trings_match(
actualSpeech,
"link"
)
rrow")
_asserts.strings_match(
actualSpeech,
"bullet link A link B"
)
def test_ariaTreeGrid_browseMode():
testFile = os.path.join(ARIAExamplesDir, "treegrid", "treegrid-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Treegrid Email Inbox Example heading level 1"
)
# when entering focus mode on the treegrid later.
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"issue 790. link"
)
# Jump to the ARIA treegrid with the next table quicknav command.
# The browse mode caret will be inside the table on the caption before the first row.
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"Inbox table clickable with 5 rows and 3 columns Inbox"
)
# Move past the caption onto row 1 with downArrow
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"row 1 column 1 Subject"
)
# Navigate to row 2 column 1 with NVDA table navigation command
actualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
actualSpeech,
"expanded level 1 row 2 Treegrids are awesome"
)
# Press enter to activate NVDA focus mode and focus the current row
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
# focus mode turns on
"Focus mode",
# Focus enters the ARIA treegrid (table)
"Inbox table",
# Focus lands on row 2
"level 1 Treegrids are awesome Want to learn how to use them? aaron at thegoogle dot rocks expanded",
])
)
def ARIAInvalid_spellingAndGrammar():
_chrome.prepareChrome(
r"""
<p>Big <span aria-invalid="spelling">caat</span> meos</p>
<p>Small <span aria-invalid="grammar">a dog</span> woofs</p>
<p>Fat <span aria-invalid="grammar, spelling">a ffrog</span> crokes</p>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Big spelling error caat meos"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Small grammar error a dog woofs"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Fat spelling error grammar error a ffrog crokes"
)
def test_ariaCheckbox_browseMode():
testFile = os.path.join(ARIAExamplesDir, "checkbox", "checkbox-1", "checkbox-1.html")
_chrome.prepareChrome(
f"""
<iframe src="{testFile}"></iframe>
"""
)
# Jump to the first heading in the iframe.
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
"frame main landmark Checkbox Example (Two State) heading level 1"
)
# Navigate to the checkbox.
actualSpeech = _chrome.getSpeechAfterKey("x")
_asserts.strings_match(
actualSpeech,
"Sandwich Condiments grouping list with 4 items Lettuce check box not checked"
)
def test_i12147():
_chrome.prepareChrome(
f"""
<div>
<button id='trigger0'>trigger 0</button>
<h4 id='target0' tabindex='-1'>target 0</h4>
</div>
<script>
let trigger0 = document.querySelector('#trigger0');
trigger0.addEventListener('click', e => {{
let focusTarget = document.querySelector('#target0');
trigger0.remove();
focusTarget.focus();
}})
</script>
"""
)
# Jump to the first button (the trigger)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"trigger 0 button"
)
# Activate the button, we should hear the new focus target.
actualSpeech = _chrome.getSpeechAfterKey("enter")
_asserts.strings_match(
actualSpeech,
"target 0 heading level 4"
)
def test_tableInStyleDisplayTable():
_chrome.prepareChrome(
"""
<p>Paragraph</p>
<div style="display:table">
<table>
<thead>
<tr>
<th>First heading</th>
<th>Second heading</th>
</tr>
</thead>
<tbody>
<tr>
<td>First content cell</td>
<td>Second content cell</td>
</tr>
</tbody>
</table>
</div>
"""
)
# Jump to the table
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 2 rows and 2 columns row 1 column 1 First heading"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+downArrow")
_asserts.strings_match(
nextActualSpeech,
"row 2 First content cell"
)
def test_ariaRoleDescription_focus():
_chrome.prepareChrome(
"""
<button aria-roledescription="pizza">Cheese</button><br />
<button aria-roledescription="pizza">Meat</button>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Cheese pizza"
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"Meat pizza"
)
def test_ariaRoleDescription_inline_browseMode():
_chrome.prepareChrome(
"""
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
"""
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_browseMode():
_chrome.prepareChrome(
"""
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
"""
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def test_ariaRoleDescription_inline_contentEditable():
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<p>Start
<img aria-roledescription="drawing" alt="Our logo" src="https://www.nvaccess.org/images/logo.png" />
End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# When reading the entire line,
# entering the custom role should be reported,
# but not exiting
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Start drawing Our logo End"
)
# When reading the line by word,
# Both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"drawing Our logo out of drawing"
)
actualSpeech = _chrome.getSpeechAfterKey("control+rightArrow")
_asserts.strings_match(
actualSpeech,
"End"
)
def test_ariaRoleDescription_block_contentEditable():
_chrome.prepareChrome(
"""
<div contenteditable="true">
<p>Top line</p>
<aside aria-roledescription="warning">
<p>Wet paint!</p>
<p>Please be careful.</p>
</aside>
<p>End</p>
</div>
"""
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"section multi line editable Top line"
)
# when reading the page by line,
# both entering and exiting the custom role should be reported.
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"warning Wet paint!"
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"Please be careful."
)
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"out of warning End"
)
def _getAriaDescriptionSample() -> str:
annotation = "User nearby, Aaron"
linkDescription = "opens in a new tab"
# link title should be read in focus
linkTitle = "conduct a search"
linkContents = "to google's"
return f"""
<div>
<div
contenteditable=""
spellcheck="false"
role="textbox"
aria-multiline="true"
><p>This is a line with no annotation</p>
<p><span
aria-description="{annotation}"
>Here is a sentence that is being edited by someone else.</span>
<b>Multiple can edit this.</b></p>
<p>An element with a role, follow <a
href="www.google.com"
aria-description="{linkDescription}"
>{linkContents}</a
> website</p>
<p>Testing the title attribute, <a
href="www.google.com"
title="{linkTitle}"
>{linkContents}</a
> website</p>
</div>
</div>
"""
def test_ariaDescription_focusMode():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation\nFocus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's",
"website"
])
)
def test_ariaDescription_browseMode():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("downArrow")
_asserts.strings_match(
actualSpeech,
"edit multi line This is a line with no annotation"
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# reporting aria-description only supported in Chrome canary 92.0.4479.0+
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"User nearby, Aaron", # annotation
"Here is a sentence that is being edited by someone else.", # span text
"Multiple can edit this.", # bold paragraph text
])
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
# description-from hasn't reached Chrome stable yet.
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website" # paragraph text
])
)
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
"to google's",
"website"
])
)
def test_ariaDescription_sayAll():
_chrome.prepareChrome(_getAriaDescriptionSample())
actualSpeech = _chrome.getSpeechAfterKey("NVDA+downArrow")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
"Test page load complete",
"edit multi line This is a line with no annotation",
SPEECH_SEP.join([
"User nearby, Aaron",
"Here is a sentence that is being edited by someone else.",
"Multiple can edit this.",
]),
SPEECH_SEP.join([
"An element with a role, follow",
"link",
"opens in a new tab",
"to google's", # link contents (name)
"website", # paragraph text
]),
# 'title' attribute for link ("conduct a search") should not be announced.
# too often title is used without screen reader users in mind, and is overly verbose.
SPEECH_SEP.join([
"Testing the title attribute,", # paragraph text
"link", # link role
# note description missing when sourced from title attribute
"to google's",
"website",
"out of edit"
]),
"After Test Case Marker"
])
)
def test_i10840():
_chrome.prepareChrome(
f"""
<table>
<thead>
<tr>
<th>Month</th>
<th>items</th>
</tr>
</thead>
<tbody>
<tr>
<td>January</td>
<td>100</td>
</tr>
<tr>
<td>February</td>
<td>80</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Sum</td>
<td>180</td>
</tr>
</tfoot>
</table>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("t")
_asserts.strings_match(
actualSpeech,
"table with 4 rows and 2 columns row 1 column 1 Month"
)
nextActualSpeech = _chrome.getSpeechAfterKey("control+alt+rightArrow")
_asserts.strings_match(
nextActualSpeech,
"column 2 items"
)
def test_mark_browse():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark>Kangaroo</mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('downArrow')
_asserts.strings_match(
actualSpeech,
"The word highlighted Kangaroo out of highlighted is important."
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"word"
)
actualSpeech = _chrome.getSpeechAfterKey("numpad6")
_asserts.strings_match(
actualSpeech,
"highlighted Kangaroo out of highlighted"
)
def test_mark_focus():
_chrome.prepareChrome(
"""
<div>
<p>The word <mark><a href="#">Kangaroo</a></mark> is important.</p>
</div>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"highlighted\nKangaroo link"
)
def test_preventDuplicateSpeechFromDescription_browse_tab():
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def preventDuplicateSpeechFromDescription_focus():
spy = _NvdaLib.getSpyLib()
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
_chrome.prepareChrome(
"""
<a href="#" title="apple" style="display:block">apple</a>
<a href="#" title="banana" aria-label="banana" style="display:block">contents</a>
"""
)
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"apple link"
)
actualSpeech = _chrome.getSpeechAfterKey('tab')
_asserts.strings_match(
actualSpeech,
"banana link"
)
def test_ensureNoBrowseModeDescription():
REPORT_OBJ_DESC_KEY = ["presentation", "reportObjectDescriptions"]
spy = _NvdaLib.getSpyLib()
spy.set_configValue(["braille", "messageTimeout"], 0)
_chrome.prepareChrome(
"\n".join([
r'<button>something for focus</button>'
r'<a href="#" style="display:block" title="Cat">Apple</a>',
# second link to make testing second focus mode tab easier
r'<a href="#" style="display:block" title="Fish">Banana</a>',
])
)
actualSpeech = _NvdaLib.getSpeechAfterKey('tab')
_builtIn.should_contain(actualSpeech, "something for focus")
# Test Browse mode
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=True"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey('downArrow')
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"link", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"lnk", # role description
# No link description (from title)
"Apple", # link name / contents
]),
message="Test browse mode with reportObjectDescriptions=False"
)
# move virtual cursor back up to reset to start position
actualSpeech = _NvdaLib.getSpeechAfterKey('upArrow')
_builtIn.should_contain(actualSpeech, "something for focus")
spy.set_configValue(REPORT_OBJ_DESC_KEY, True)
# Test focus mode
actualSpeech = _NvdaLib.getSpeechAfterKey("nvda+space")
_asserts.speech_matches(actualSpeech, "Focus mode")
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Apple", # link name / contents
"link", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Apple", # link name / contents
"lnk", # role description
"Cat", # link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=True"
)
# Use second link to test focus mode when 'reportObjectDescriptions' is off.
spy.set_configValue(REPORT_OBJ_DESC_KEY, False)
actualSpeech, actualBraille = _NvdaLib.getSpeechAndBrailleAfterKey("tab")
_asserts.speech_matches(
actualSpeech,
SPEECH_SEP.join([
"Banana", # link name / contents
"link", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
_asserts.braille_matches(
actualBraille,
BRAILLE_SEP.join([
"Banana", # link name / contents
"lnk", # role description
# No link description (from title)
]),
message="Test focus mode with reportObjectDescriptions=False"
)
def test_quickNavTargetReporting():
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<h1>Quick Nav Target</h1>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Quick nav to heading
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
])
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("control+home")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Before Test Case Marker",
])
)
# Quick nav to heading with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("h")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Quick Nav Target", # Heading content (quick nav target), should read first
"heading", # Heading role
"level 1", # Heading level
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
])
)
def test_focusTargetReporting():
spy = _NvdaLib.getSpyLib()
REPORT_ARTICLES = ["documentFormatting", "reportArticles"]
spy.set_configValue(REPORT_ARTICLES, False)
_chrome.prepareChrome(
"""
<a href="#">before Target</a>
<div
aria-describedby="descId"
aria-labelledby="labelId"
role="article"
>
<a href="#">Focus Target</a>
<div id="labelId">
<div>Some name.</div>
</div>
<div id="descId">
<span>A bunch of text.</span>
</div>
</div>
"""
)
# Set focus
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
message="browse mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
"article", # article role, enabled via report article
"A bunch of text.", # article (ancestor) description
]),
message="browse mode - focus with Report Articles enabled"
)
# Reset to allow trying again in focus mode
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Force focus mode
actualSpeech = _chrome.getSpeechAfterKey("NVDA+space")
_asserts.strings_match(
actualSpeech,
"Focus mode"
)
spy.set_configValue(REPORT_ARTICLES, False)
# Focus the link
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles disabled"
)
# Reset to allow trying again with report articles enabled
actualSpeech = _chrome.getSpeechAfterKey("shift+tab")
_asserts.strings_match(
actualSpeech,
SPEECH_SEP.join([
"before Target",
"link",
])
)
# Focus the link with report articles enabled
spy.set_configValue(REPORT_ARTICLES, True)
actualSpeech = _chrome.getSpeechAfterKey("tab")
_asserts.strings_match(
actualSpeech,
SPEECH_CALL_SEP.join([
SPEECH_SEP.join([
"Some name.", # name for article
"article", # article role, enabled via report article
"A bunch of text.", # description for article
]),
SPEECH_SEP.join([
"Focus Target", # link content (focus target), should read first
"link", # link role
]),
]),
message="focus mode - focus with Report Articles enabled"
)
| true | true |
f724de73bfb07fa9766f490a464f1f8eb216b233 | 738 | py | Python | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | tags/migrations/0002_auto_20160704_1112.py | making3/summonerqa | 7ab8472b2d24236ba1e6919fa0f00881f4a3e633 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-04 16:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tags.Category'),
),
migrations.AddField(
model_name='tag',
name='regex',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
]
| 26.357143 | 124 | 0.611111 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tags.Category'),
),
migrations.AddField(
model_name='tag',
name='regex',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
]
| true | true |
f724ded074f8fa3a1a1d5041388c8593fb112856 | 924 | py | Python | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | 2 | 2021-07-05T12:00:39.000Z | 2021-07-05T12:00:49.000Z | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.ext.commands import cog
import discord_slash
from discord_slash import cog_ext
class Slashes(commands.Cog):
def __init__(self, client) -> None:
self.client: commands.Bot = client
@commands.Cog.listener()
async def on_ready(self):
print(f"[ {self.__class__.__name__} Cog Loaded ]")
@cog_ext.cog_slash(name = "ping", guild_ids=[853316413649190912], description="Bot's latency")
async def ping(self, ctx):
await ctx.send("Pong! {}".format(str(round(self.client.latency))+"ms"))
@cog_ext.cog_slash(name="say", description="say something with the bot", guild_ids=[853316413649190912])
async def say(ctx: discord_slash.SlashContext, *, text: str):
if '@' in text:
await ctx.send("no")
return
await ctx.send(text)
def setup(client):
client.add_cog(Slashes(client)) | 35.538462 | 108 | 0.676407 | import discord
from discord.ext import commands
from discord.ext.commands import cog
import discord_slash
from discord_slash import cog_ext
class Slashes(commands.Cog):
def __init__(self, client) -> None:
self.client: commands.Bot = client
@commands.Cog.listener()
async def on_ready(self):
print(f"[ {self.__class__.__name__} Cog Loaded ]")
@cog_ext.cog_slash(name = "ping", guild_ids=[853316413649190912], description="Bot's latency")
async def ping(self, ctx):
await ctx.send("Pong! {}".format(str(round(self.client.latency))+"ms"))
@cog_ext.cog_slash(name="say", description="say something with the bot", guild_ids=[853316413649190912])
async def say(ctx: discord_slash.SlashContext, *, text: str):
if '@' in text:
await ctx.send("no")
return
await ctx.send(text)
def setup(client):
client.add_cog(Slashes(client)) | true | true |
f724dee757778c7059a8bbb1f08fe86a9affccc9 | 652 | py | Python | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | 2 | 2021-10-05T03:03:34.000Z | 2022-03-15T12:38:07.000Z | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | null | null | null | manage_index.py | jdoiro3/myPersonalSite | a61245cfdc497a864c58fd0d9eee27a35f0b52f3 | [
"MIT"
] | null | null | null | from modules import index
import argparse
commands = ["cleanup", "re-index"]
parser = argparse.ArgumentParser(description='Manager for the Inverted Index.')
parser.add_argument('command', choices=commands, help='Command to perform on index.')
parser.add_argument('--in_s3', action='store_true', help='If passed, the index will be loaded from the S3 bucket')
parser.add_argument('--file_path', nargs='?', const='index.json', help='The file path for the index.')
args = parser.parse_args()
inv_index = index.InvertedIndex(from_file=True, in_s3=args.in_s3, file_path=args.file_path or 'index.json')
if args.command == "cleanup":
inv_index.cleanup() | 46.571429 | 114 | 0.753067 | from modules import index
import argparse
commands = ["cleanup", "re-index"]
parser = argparse.ArgumentParser(description='Manager for the Inverted Index.')
parser.add_argument('command', choices=commands, help='Command to perform on index.')
parser.add_argument('--in_s3', action='store_true', help='If passed, the index will be loaded from the S3 bucket')
parser.add_argument('--file_path', nargs='?', const='index.json', help='The file path for the index.')
args = parser.parse_args()
inv_index = index.InvertedIndex(from_file=True, in_s3=args.in_s3, file_path=args.file_path or 'index.json')
if args.command == "cleanup":
inv_index.cleanup() | true | true |
f724df091556b7dbed963d14802c99783e73424c | 4,049 | py | Python | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | pajbot/modules/top.py | Troy-Bot/pajbot | 11b7e86ca270d57d4f35226effc3eb16250e2dfc | [
"MIT"
] | null | null | null | import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings['num_top']} chatters: {', '.join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} watchers: {', '.join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} offline chatters: {', '.join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings['num_top']} banks: {', '.join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
| 37.841121 | 138 | 0.605829 | import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings['num_top']} chatters: {', '.join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} watchers: {', '.join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} offline chatters: {', '.join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings['num_top']} banks: {', '.join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
| true | true |
f724df327e8441ac179a887f4d64a5bd5eb292a3 | 3,207 | py | Python | jigs/hpcc/source/lysozyme_we.py | gitter-badger/wepy-1 | 9bc619aeae178ad5d10f658fae2abfd2c7aeb18a | [
"MIT"
] | 35 | 2017-08-22T15:39:06.000Z | 2022-03-20T15:17:52.000Z | jigs/hpcc/source/lysozyme_we.py | gitter-badger/wepy-1 | 9bc619aeae178ad5d10f658fae2abfd2c7aeb18a | [
"MIT"
] | 33 | 2017-10-02T22:04:45.000Z | 2022-03-02T22:19:08.000Z | jigs/hpcc/source/lysozyme_we.py | stxinsite/wepy | 352d4c1316b20e839aae8824eedd66f0f2d0b456 | [
"MIT"
] | 17 | 2018-07-14T15:33:30.000Z | 2022-01-18T16:30:55.000Z | from pympler.asizeof import asizeof
def get_size(obj):
"""get the size in units of Mb"""
return asizeof(obj) / 1000000
if __name__ == "__main__":
# prom.start_http_server(9001)
import os
import shutil
import sys
import logging
from pathlib import Path
# from multiprocessing_logging import install_mp_handler
from wepy_tools.monitoring.prometheus import SimMonitor
from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
logging.getLogger().setLevel(logging.DEBUG)
# install_mp_handler()
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler, work_mapper, tag")
exit()
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
n_workers = int(sys.argv[4])
platform = sys.argv[5]
resampler = sys.argv[6]
work_mapper = sys.argv[7]
tag = sys.argv[8]
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
output_dir = Path('_output')
result_dir = output_dir / 'we_lysozyme'
# make the results directory if not already made
try:
shutil.rmtree(result_dir)
except FileNotFoundError:
pass
os.makedirs(result_dir, exist_ok=True)
sim_maker = LysozymeImplicitOpenMMSimMaker()
apparatus = sim_maker.make_apparatus(
integrator='LangevinIntegrator',
resampler=resampler,
bc='UnbindingBC',
platform=platform,
)
work_mapper_spec = work_mapper
work_mapper_class = None
work_mapper_params = {
'platform' : platform,
'device_ids' : [str(i) for i in range(n_workers)],
}
monitor_class = SimMonitor
monitor_params = {
'tag' : tag,
'port' : 9001,
}
config = sim_maker.make_configuration(apparatus,
work_mapper_class=work_mapper_class,
work_mapper_spec=work_mapper_spec,
work_mapper_params=work_mapper_params,
platform=platform,
work_dir=str(result_dir),
monitor_class=monitor_class,
monitor_params=monitor_params,
)
breakpoint()
## set up profiling and initial stats
print("Orchestration objects")
print("----------------------------------------")
print(f"Sim maker size: {get_size(sim_maker)} Mb")
print(f"Apparatus size: {get_size(apparatus)} Mb")
print(f"Configuration size: {get_size(config)} Mb")
print("----------------------------------------\n")
sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
print("Starting run")
print("----------------------------------------")
sim_manager.run_simulation(n_cycles, n_steps,
num_workers=n_workers)
print("----------------------------------------")
print("Finished run")
| 28.633929 | 106 | 0.565326 | from pympler.asizeof import asizeof
def get_size(obj):
return asizeof(obj) / 1000000
if __name__ == "__main__":
import os
import shutil
import sys
import logging
from pathlib import Path
from wepy_tools.monitoring.prometheus import SimMonitor
from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
logging.getLogger().setLevel(logging.DEBUG)
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler, work_mapper, tag")
exit()
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
n_workers = int(sys.argv[4])
platform = sys.argv[5]
resampler = sys.argv[6]
work_mapper = sys.argv[7]
tag = sys.argv[8]
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
output_dir = Path('_output')
result_dir = output_dir / 'we_lysozyme'
try:
shutil.rmtree(result_dir)
except FileNotFoundError:
pass
os.makedirs(result_dir, exist_ok=True)
sim_maker = LysozymeImplicitOpenMMSimMaker()
apparatus = sim_maker.make_apparatus(
integrator='LangevinIntegrator',
resampler=resampler,
bc='UnbindingBC',
platform=platform,
)
work_mapper_spec = work_mapper
work_mapper_class = None
work_mapper_params = {
'platform' : platform,
'device_ids' : [str(i) for i in range(n_workers)],
}
monitor_class = SimMonitor
monitor_params = {
'tag' : tag,
'port' : 9001,
}
config = sim_maker.make_configuration(apparatus,
work_mapper_class=work_mapper_class,
work_mapper_spec=work_mapper_spec,
work_mapper_params=work_mapper_params,
platform=platform,
work_dir=str(result_dir),
monitor_class=monitor_class,
monitor_params=monitor_params,
)
breakpoint()
print("----------------------------------------")
print(f"Sim maker size: {get_size(sim_maker)} Mb")
print(f"Apparatus size: {get_size(apparatus)} Mb")
print(f"Configuration size: {get_size(config)} Mb")
print("----------------------------------------\n")
sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
print("Starting run")
print("----------------------------------------")
sim_manager.run_simulation(n_cycles, n_steps,
num_workers=n_workers)
print("----------------------------------------")
print("Finished run")
| true | true |
f724e052a84d5bf01809f05e2ce2708627528d63 | 6,634 | py | Python | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/session_ended_request.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.session_ended_error import SessionEndedError
from ask_sdk_model.session_ended_reason import SessionEndedReason
class SessionEndedRequest(Request):
"""
A SessionEndedRequest is an object that represents a request made to an Alexa skill to notify that a session was ended. Your service receives a SessionEndedRequest when a currently open session is closed for one of the following reasons: <ol><li>The user says “exit”</li><li>the user does not respond or says something that does not match an intent defined in your voice interface while the device is listening for the user’s response</li><li>an error occurs</li></ol>
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param reason: Describes why the session ended.
:type reason: (optional) ask_sdk_model.session_ended_reason.SessionEndedReason
:param error: An error object providing more information about the error that occurred.
:type error: (optional) ask_sdk_model.session_ended_error.SessionEndedError
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'reason': 'ask_sdk_model.session_ended_reason.SessionEndedReason',
'error': 'ask_sdk_model.session_ended_error.SessionEndedError'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'reason': 'reason',
'error': 'error'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, reason=None, error=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[SessionEndedReason], Optional[SessionEndedError]) -> None
"""A SessionEndedRequest is an object that represents a request made to an Alexa skill to notify that a session was ended. Your service receives a SessionEndedRequest when a currently open session is closed for one of the following reasons: <ol><li>The user says “exit”</li><li>the user does not respond or says something that does not match an intent defined in your voice interface while the device is listening for the user’s response</li><li>an error occurs</li></ol>
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param reason: Describes why the session ended.
:type reason: (optional) ask_sdk_model.session_ended_reason.SessionEndedReason
:param error: An error object providing more information about the error that occurred.
:type error: (optional) ask_sdk_model.session_ended_error.SessionEndedError
"""
self.__discriminator_value = "SessionEndedRequest" # type: str
self.object_type = self.__discriminator_value
super(SessionEndedRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.reason = reason
self.error = error
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SessionEndedRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 47.385714 | 527 | 0.668074 |
import pprint
import re
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.session_ended_error import SessionEndedError
from ask_sdk_model.session_ended_reason import SessionEndedReason
class SessionEndedRequest(Request):
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'reason': 'ask_sdk_model.session_ended_reason.SessionEndedReason',
'error': 'ask_sdk_model.session_ended_error.SessionEndedError'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'reason': 'reason',
'error': 'error'
}
def __init__(self, request_id=None, timestamp=None, locale=None, reason=None, error=None):
self.__discriminator_value = "SessionEndedRequest"
self.object_type = self.__discriminator_value
super(SessionEndedRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.reason = reason
self.error = error
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SessionEndedRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f724e0dae8457b34df64dc725e37573bd868d2fc | 1,108 | py | Python | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | 1 | 2019-12-29T13:40:16.000Z | 2019-12-29T13:40:16.000Z | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | null | null | null | gym-unity/setup.py | alexcercos/ML-Agents | c096c36b0348e3673b687499e17891cd35168939 | [
"Apache-2.0"
] | 2 | 2020-08-16T14:18:16.000Z | 2022-03-18T12:22:54.000Z | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = "0.11.0"
class VerifyVersionCommand(install):
"""
Custom command to verify that the git tag matches our version
See https://circleci.com/blog/continuously-deploying-python-packages-to-pypi-with-circleci/
"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="gym_unity",
version=VERSION,
description="Unity Machine Learning Agents Gym Interface",
license="Apache License 2.0",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
url="https://github.com/Unity-Technologies/ml-agents",
packages=find_packages(),
install_requires=["gym", "mlagents_envs=={}".format(VERSION)],
cmdclass={"verify": VerifyVersionCommand},
)
| 27.02439 | 95 | 0.666968 |
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = "0.11.0"
class VerifyVersionCommand(install):
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="gym_unity",
version=VERSION,
description="Unity Machine Learning Agents Gym Interface",
license="Apache License 2.0",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
url="https://github.com/Unity-Technologies/ml-agents",
packages=find_packages(),
install_requires=["gym", "mlagents_envs=={}".format(VERSION)],
cmdclass={"verify": VerifyVersionCommand},
)
| true | true |
f724e1095b8e197a2c35d40a6c7744239f4d58e6 | 2,426 | py | Python | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 7 | 2021-01-11T05:57:18.000Z | 2022-01-14T21:51:54.000Z | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 1 | 2021-04-09T17:00:57.000Z | 2021-04-09T17:00:57.000Z | webapp/web_app.py | baishalidutta/Comments-Toxicity-Detection | c56cd2eb02983c418cbe91fc4a2a257067cdcb89 | [
"Apache-2.0"
] | 1 | 2021-02-20T23:47:26.000Z | 2021-02-20T23:47:26.000Z | __author__ = "Baishali Dutta"
__copyright__ = "Copyright (C) 2021 Baishali Dutta"
__license__ = "Apache License 2.0"
__version__ = "0.1"
# -------------------------------------------------------------------------
# Import Libraries
# -------------------------------------------------------------------------
import pickle
import gradio as gr
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from source.config import *
from source.data_cleaning import clean_text
# -------------------------------------------------------------------------
# Load Existing Model and Tokenizer
# -------------------------------------------------------------------------
# load the trained model
rnn_model = load_model(MODEL_LOC)
# load the tokenizer
with open(TOKENIZER_LOC, 'rb') as handle:
tokenizer = pickle.load(handle)
# -------------------------------------------------------------------------
# Main Application
# -------------------------------------------------------------------------
def make_prediction(input_comment):
"""
Predicts the toxicity of the specified comment
:param input_comment: the comment to be verified
"""
input_comment = clean_text(input_comment)
input_comment = input_comment.split(" ")
sequences = tokenizer.texts_to_sequences(input_comment)
sequences = [[item for sublist in sequences for item in sublist]]
padded_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
result = rnn_model.predict(padded_data, len(padded_data), verbose=1)
return \
{
"Toxic": str(result[0][0]),
"Very Toxic": str(result[0][1]),
"Obscene": str(result[0][2]),
"Threat": str(result[0][3]),
"Insult": str(result[0][4]),
"Hate": str(result[0][5]),
"Neutral": str(result[0][6])
}
comment = gr.inputs.Textbox(lines=17, placeholder="Enter your comment here")
title = "Comments Toxicity Detection"
description = "This application uses a Bidirectional Long Short-Term Memory (LSTM) Recurrent Neural Network (RNN) " \
"model to predict the inappropriateness of a comment"
gr.Interface(fn=make_prediction,
inputs=comment,
outputs="label",
title=title,
description=description) \
.launch()
| 33.694444 | 117 | 0.528854 | __author__ = "Baishali Dutta"
__copyright__ = "Copyright (C) 2021 Baishali Dutta"
__license__ = "Apache License 2.0"
__version__ = "0.1"
import pickle
import gradio as gr
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from source.config import *
from source.data_cleaning import clean_text
rnn_model = load_model(MODEL_LOC)
with open(TOKENIZER_LOC, 'rb') as handle:
tokenizer = pickle.load(handle)
def make_prediction(input_comment):
input_comment = clean_text(input_comment)
input_comment = input_comment.split(" ")
sequences = tokenizer.texts_to_sequences(input_comment)
sequences = [[item for sublist in sequences for item in sublist]]
padded_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
result = rnn_model.predict(padded_data, len(padded_data), verbose=1)
return \
{
"Toxic": str(result[0][0]),
"Very Toxic": str(result[0][1]),
"Obscene": str(result[0][2]),
"Threat": str(result[0][3]),
"Insult": str(result[0][4]),
"Hate": str(result[0][5]),
"Neutral": str(result[0][6])
}
comment = gr.inputs.Textbox(lines=17, placeholder="Enter your comment here")
title = "Comments Toxicity Detection"
description = "This application uses a Bidirectional Long Short-Term Memory (LSTM) Recurrent Neural Network (RNN) " \
"model to predict the inappropriateness of a comment"
gr.Interface(fn=make_prediction,
inputs=comment,
outputs="label",
title=title,
description=description) \
.launch()
| true | true |
f724e1cf06b432b67c696656847168d974deac36 | 2,657 | py | Python | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/parser/template/graph_tests/test_eval.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.get import TemplateGetNode
from programy.parser.template.nodes.eval import TemplateEvalNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphEvalTests(TemplateGraphTestClient):
def test_eval_node_from_xml_single_word(self):
template = ET.fromstring("""
<template>
<eval>Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 2)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateWordNode)
self.assertEqual(node.children[1].word, "Text")
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some <get name="SomeGet" /> Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 3)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateGetNode)
self.assertIsInstance(node.children[2], TemplateWordNode)
self.assertEqual(node.children[2].word, "Text")
| 36.39726 | 94 | 0.705307 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.word import TemplateWordNode
from programy.parser.template.nodes.get import TemplateGetNode
from programy.parser.template.nodes.eval import TemplateEvalNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphEvalTests(TemplateGraphTestClient):
def test_eval_node_from_xml_single_word(self):
template = ET.fromstring("""
<template>
<eval>Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 2)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateWordNode)
self.assertEqual(node.children[1].word, "Text")
def test_eval_node_from_xml_multi_words(self):
template = ET.fromstring("""
<template>
<eval>Some <get name="SomeGet" /> Text</eval>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateEvalNode)
self.assertEqual(len(node.children), 3)
self.assertIsInstance(node.children[0], TemplateWordNode)
self.assertEqual(node.children[0].word, "Some")
self.assertIsInstance(node.children[1], TemplateGetNode)
self.assertIsInstance(node.children[2], TemplateWordNode)
self.assertEqual(node.children[2].word, "Text")
| true | true |
f724e27067df0d8b936028ff1d33b38c5cfba530 | 462 | py | Python | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | djangoapp_cloudedbats_bat_activity/urls.py | cloudedbats/cloudedbats_web_archive | 39e571aa88efd149fd07b4ecc33207af44276c9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
from django.conf.urls import url, include
# import cloudedbats_django.djangoapp_cloudedbats_species.views as species_views
import djangoapp_cloudedbats_bat_activity.views as bat_activity_views
urlpatterns = [
url(r'^', bat_activity_views.bat_activity),
]
| 28.875 | 80 | 0.772727 |
from django.conf.urls import url, include
import djangoapp_cloudedbats_bat_activity.views as bat_activity_views
urlpatterns = [
url(r'^', bat_activity_views.bat_activity),
]
| true | true |
f724e28c80153996114878fb2122ab04143fb7c4 | 5,426 | py | Python | tests/opentracer/core/test_span.py | brettlangdon/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/opentracer/core/test_span.py | brettlangdon/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2021-10-07T02:22:59.000Z | 2021-12-15T02:15:48.000Z | tests/opentracer/core/test_span.py | depop/dd-trace-py | 95e2641d734669719ca07841de58e233cb0f49e9 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-28T06:20:53.000Z | 2020-09-28T06:20:53.000Z | import pytest
from ddtrace.opentracer.span import Span
from tests.utils import DummyTracer
@pytest.fixture
def nop_tracer():
from ddtrace.opentracer import Tracer
tracer = Tracer(service_name="mysvc", config={})
# use the same test tracer used by the primary tests
tracer._tracer = DummyTracer()
return tracer
@pytest.fixture
def nop_span_ctx():
from ddtrace.constants import AUTO_KEEP
from ddtrace.opentracer.span_context import SpanContext
return SpanContext(sampling_priority=AUTO_KEEP)
@pytest.fixture
def nop_span(nop_tracer, nop_span_ctx):
return Span(nop_tracer, nop_span_ctx, "my_op_name")
class TestSpan(object):
"""Test the Datadog OpenTracing Span implementation."""
def test_init(self, nop_tracer, nop_span_ctx):
"""Very basic test for skeleton code"""
span = Span(nop_tracer, nop_span_ctx, "my_op_name")
assert not span.finished
def test_tags(self, nop_span):
"""Set a tag and get it back."""
nop_span.set_tag("test", 23)
assert nop_span._get_metric("test") == 23
def test_set_baggage(self, nop_span):
"""Test setting baggage."""
r = nop_span.set_baggage_item("test", 23)
assert r is nop_span
r = nop_span.set_baggage_item("1", 1).set_baggage_item("2", 2)
assert r is nop_span
def test_get_baggage(self, nop_span):
"""Test setting and getting baggage."""
# test a single item
nop_span.set_baggage_item("test", 23)
assert int(nop_span.get_baggage_item("test")) == 23
# test multiple items
nop_span.set_baggage_item("1", "1").set_baggage_item("2", 2)
assert int(nop_span.get_baggage_item("test")) == 23
assert nop_span.get_baggage_item("1") == "1"
assert int(nop_span.get_baggage_item("2")) == 2
def test_log_kv(self, nop_span):
"""Ensure logging values doesn't break anything."""
# just log a bunch of values
nop_span.log_kv({"myval": 2})
nop_span.log_kv({"myval2": 3})
nop_span.log_kv({"myval3": 5})
nop_span.log_kv({"myval": 2})
def test_log_dd_kv(self, nop_span):
"""Ensure keys that can be handled by our impl. are indeed handled."""
import traceback
from ddtrace.ext import errors
stack_trace = str(traceback.format_stack())
nop_span.log_kv(
{
"event": "error",
"error": 3,
"message": "my error message",
"stack": stack_trace,
}
)
# Ensure error flag is set...
assert nop_span._dd_span.error
# ...and that error tags are set with the correct key
assert nop_span._get_tag(errors.ERROR_STACK) == stack_trace
assert nop_span._get_tag(errors.ERROR_MSG) == "my error message"
assert nop_span._get_metric(errors.ERROR_TYPE) == 3
def test_operation_name(self, nop_span):
"""Sanity check for setting the operation name."""
# just try setting the operation name
nop_span.set_operation_name("new_op_name")
assert nop_span._dd_span.name == "new_op_name"
def test_context_manager(self, nop_span):
"""Test the span context manager."""
import time
assert not nop_span.finished
# run the context manager but since the span has not been added
# to the span context, we will not get any traces
with nop_span:
time.sleep(0.005)
# span should be finished when the context manager exits
assert nop_span.finished
# there should be no traces (see above comment)
spans = nop_span.tracer._tracer.pop()
assert len(spans) == 0
def test_immutable_span_context(self, nop_span):
"""Ensure span contexts are immutable."""
before_ctx = nop_span._context
nop_span.set_baggage_item("key", "value")
after_ctx = nop_span._context
# should be different contexts
assert before_ctx is not after_ctx
class TestSpanCompatibility(object):
"""Ensure our opentracer spans features correspond to datadog span features."""
def test_set_tag(self, nop_span):
nop_span.set_tag("test", 2)
assert nop_span._get_metric("test") == 2
def test_tag_resource_name(self, nop_span):
nop_span.set_tag("resource.name", "myresource")
assert nop_span._dd_span.resource == "myresource"
def test_tag_span_type(self, nop_span):
nop_span.set_tag("span.type", "db")
assert nop_span._dd_span.span_type == "db"
def test_tag_service_name(self, nop_span):
nop_span.set_tag("service.name", "mysvc234")
assert nop_span._dd_span.service == "mysvc234"
def test_tag_db_statement(self, nop_span):
nop_span.set_tag("db.statement", "SELECT * FROM USERS")
assert nop_span._dd_span.resource == "SELECT * FROM USERS"
def test_tag_peer_hostname(self, nop_span):
nop_span.set_tag("peer.hostname", "peername")
assert nop_span._dd_span.get_tag("out.host") == "peername"
def test_tag_peer_port(self, nop_span):
nop_span.set_tag("peer.port", 55555)
assert nop_span._get_metric("out.port") == 55555
def test_tag_sampling_priority(self, nop_span):
nop_span.set_tag("sampling.priority", "2")
assert nop_span._dd_span.context.sampling_priority == "2"
| 33.9125 | 83 | 0.653336 | import pytest
from ddtrace.opentracer.span import Span
from tests.utils import DummyTracer
@pytest.fixture
def nop_tracer():
from ddtrace.opentracer import Tracer
tracer = Tracer(service_name="mysvc", config={})
tracer._tracer = DummyTracer()
return tracer
@pytest.fixture
def nop_span_ctx():
from ddtrace.constants import AUTO_KEEP
from ddtrace.opentracer.span_context import SpanContext
return SpanContext(sampling_priority=AUTO_KEEP)
@pytest.fixture
def nop_span(nop_tracer, nop_span_ctx):
return Span(nop_tracer, nop_span_ctx, "my_op_name")
class TestSpan(object):
def test_init(self, nop_tracer, nop_span_ctx):
span = Span(nop_tracer, nop_span_ctx, "my_op_name")
assert not span.finished
def test_tags(self, nop_span):
nop_span.set_tag("test", 23)
assert nop_span._get_metric("test") == 23
def test_set_baggage(self, nop_span):
r = nop_span.set_baggage_item("test", 23)
assert r is nop_span
r = nop_span.set_baggage_item("1", 1).set_baggage_item("2", 2)
assert r is nop_span
def test_get_baggage(self, nop_span):
nop_span.set_baggage_item("test", 23)
assert int(nop_span.get_baggage_item("test")) == 23
nop_span.set_baggage_item("1", "1").set_baggage_item("2", 2)
assert int(nop_span.get_baggage_item("test")) == 23
assert nop_span.get_baggage_item("1") == "1"
assert int(nop_span.get_baggage_item("2")) == 2
def test_log_kv(self, nop_span):
nop_span.log_kv({"myval": 2})
nop_span.log_kv({"myval2": 3})
nop_span.log_kv({"myval3": 5})
nop_span.log_kv({"myval": 2})
def test_log_dd_kv(self, nop_span):
import traceback
from ddtrace.ext import errors
stack_trace = str(traceback.format_stack())
nop_span.log_kv(
{
"event": "error",
"error": 3,
"message": "my error message",
"stack": stack_trace,
}
)
assert nop_span._dd_span.error
assert nop_span._get_tag(errors.ERROR_STACK) == stack_trace
assert nop_span._get_tag(errors.ERROR_MSG) == "my error message"
assert nop_span._get_metric(errors.ERROR_TYPE) == 3
def test_operation_name(self, nop_span):
nop_span.set_operation_name("new_op_name")
assert nop_span._dd_span.name == "new_op_name"
def test_context_manager(self, nop_span):
import time
assert not nop_span.finished
with nop_span:
time.sleep(0.005)
assert nop_span.finished
spans = nop_span.tracer._tracer.pop()
assert len(spans) == 0
def test_immutable_span_context(self, nop_span):
before_ctx = nop_span._context
nop_span.set_baggage_item("key", "value")
after_ctx = nop_span._context
assert before_ctx is not after_ctx
class TestSpanCompatibility(object):
def test_set_tag(self, nop_span):
nop_span.set_tag("test", 2)
assert nop_span._get_metric("test") == 2
def test_tag_resource_name(self, nop_span):
nop_span.set_tag("resource.name", "myresource")
assert nop_span._dd_span.resource == "myresource"
def test_tag_span_type(self, nop_span):
nop_span.set_tag("span.type", "db")
assert nop_span._dd_span.span_type == "db"
def test_tag_service_name(self, nop_span):
nop_span.set_tag("service.name", "mysvc234")
assert nop_span._dd_span.service == "mysvc234"
def test_tag_db_statement(self, nop_span):
nop_span.set_tag("db.statement", "SELECT * FROM USERS")
assert nop_span._dd_span.resource == "SELECT * FROM USERS"
def test_tag_peer_hostname(self, nop_span):
nop_span.set_tag("peer.hostname", "peername")
assert nop_span._dd_span.get_tag("out.host") == "peername"
def test_tag_peer_port(self, nop_span):
nop_span.set_tag("peer.port", 55555)
assert nop_span._get_metric("out.port") == 55555
def test_tag_sampling_priority(self, nop_span):
nop_span.set_tag("sampling.priority", "2")
assert nop_span._dd_span.context.sampling_priority == "2"
| true | true |
f724e2e16afd314dfd71391ec47943c9a4b364d9 | 7,749 | py | Python | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | 4 | 2021-12-23T15:51:21.000Z | 2022-01-25T08:55:31.000Z | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | null | null | null | src/shimoku_api_python/configuration.py | shimoku-tech/shimoku-api-python | de26e7d80631647e68794277b15397403336f252 | [
"MIT"
] | 1 | 2022-03-02T01:13:04.000Z | 2022-03-02T01:13:04.000Z | """"""
import copy
import logging
import multiprocessing
import sys
import urllib3
class Configuration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
_default = None
def __init__(self):
"""Constructor"""
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
# Default Base url
self.host = "https://server.api.mailchimp.com/3.0"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("mailchimp_marketing")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'basicAuth':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 3.0.70\n"\
"SDK Package Version: 3.0.70".\
format(env=sys.platform, pyversion=sys.version)
| 34.748879 | 80 | 0.599303 |
import copy
import logging
import multiprocessing
import sys
import urllib3
class Configuration(object):
_default = None
def __init__(self):
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
self.host = "https://server.api.mailchimp.com/3.0"
self.temp_folder_path = None
self.api_key = {}
self.api_key_prefix = {}
self.refresh_api_key_hook = None
self.username = ""
self.password = ""
self.logger = {}
self.logger["package_logger"] = logging.getLogger("mailchimp_marketing")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
self.logger_stream_handler = None
self.logger_file_handler = None
self.logger_file = None
self.debug = False
self.verify_ssl = True
self.ssl_ca_cert = None
self.cert_file = None
self.key_file = None
self.assert_hostname = None
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
return self.__debug
@debug.setter
def debug(self, value):
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
return {
'basicAuth':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 3.0.70\n"\
"SDK Package Version: 3.0.70".\
format(env=sys.platform, pyversion=sys.version)
| true | true |
f724e4bc2f3b9f7eafff6e16166f0dbe55dce02c | 1,138 | py | Python | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | send_msg.py | nocholasrift/wordlebot | c97576697c47a1f5a35722af78a6758ba9a325bf | [
"MIT"
] | null | null | null | import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
client = WebClient(token="xoxb-435046985394-3004455722741-fpIQQHskeFNILHcT3hGoPIF7");
channel_id="wordle"
is_solved = True
guesses = []
with open("tmp", "r") as f:
for line in f:
line = line.strip()
if line == "IMPOSSIBLE":
is_solved = False
continue
if line == "DONE":
continue
print(line)
guesses.append(line)
map_ = {'x': ':black_large_square:', 'y': ':large_blue_square:', 'g': ":large_orange_square:"}
text=f'Wordle 220 {len(guesses)}/6\n\n'
for guess in guesses:
for cell in guess:
text+=map_[cell]
text+="\n"
print(guesses)
try:
# Call the conversations.list method using the WebClient
result = client.chat_postMessage(
username="wordlebot",
icon_emoji=":large_green_square",
channel=channel_id,
text=text
# You could also use a blocks[] array to send richer content
)
# Print result, which includes information about the message (like TS)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
| 22.313725 | 95 | 0.655536 | import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
client = WebClient(token="xoxb-435046985394-3004455722741-fpIQQHskeFNILHcT3hGoPIF7");
channel_id="wordle"
is_solved = True
guesses = []
with open("tmp", "r") as f:
for line in f:
line = line.strip()
if line == "IMPOSSIBLE":
is_solved = False
continue
if line == "DONE":
continue
print(line)
guesses.append(line)
map_ = {'x': ':black_large_square:', 'y': ':large_blue_square:', 'g': ":large_orange_square:"}
text=f'Wordle 220 {len(guesses)}/6\n\n'
for guess in guesses:
for cell in guess:
text+=map_[cell]
text+="\n"
print(guesses)
try:
result = client.chat_postMessage(
username="wordlebot",
icon_emoji=":large_green_square",
channel=channel_id,
text=text
)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
| true | true |
f724e6dae565bcc5a26d05bdb7f2553473458a1f | 1,242 | py | Python | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 3 | 2020-04-22T04:09:18.000Z | 2021-12-20T08:44:44.000Z | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 11 | 2019-08-31T08:37:40.000Z | 2019-08-31T11:25:29.000Z | tests/test_project_setups.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 1 | 2020-11-24T11:18:50.000Z | 2020-11-24T11:18:50.000Z | import pytest
"""
This file include several configuration of answers to setup file.
Each configuration should be completed without errors to pass this tests.
"""
@pytest.mark.skip
def test_all_python_versions_deploy():
"""Test setup.py format correct for all Python versions support."""
pass
@pytest.mark.skip
def test_2x_only_python_version_deploy():
"""Test setup.py format correct for Python 2.7 only versions support."""
pass
@pytest.mark.skip
def test_3x_only_python_versions_deploy():
"""Test setup.py format correct for all Python 3.x versions supported."""
pass
@pytest.mark.skip
def test_markdown_documentation():
pass
@pytest.mark.skip
def test_rst_documentation():
pass
@pytest.mark.skip
def test_install_github_issues_templates():
pass
@pytest.mark.skip
def test_install_gitlab_issues_templates():
pass
@pytest.mark.skip
def test_mit_license_deploy():
pass
@pytest.mark.skip
def test_bsd_license_deploy():
pass
@pytest.mark.skip
def test_gnu_license_deploy():
pass
@pytest.mark.skip
def test_apache_license_deploy():
pass
@pytest.mark.skip
def test_unlicensed_license_deploy():
pass
@pytest.mark.skip
def test_none_license_deploy():
pass
| 16.342105 | 77 | 0.750403 | import pytest
@pytest.mark.skip
def test_all_python_versions_deploy():
pass
@pytest.mark.skip
def test_2x_only_python_version_deploy():
pass
@pytest.mark.skip
def test_3x_only_python_versions_deploy():
pass
@pytest.mark.skip
def test_markdown_documentation():
pass
@pytest.mark.skip
def test_rst_documentation():
pass
@pytest.mark.skip
def test_install_github_issues_templates():
pass
@pytest.mark.skip
def test_install_gitlab_issues_templates():
pass
@pytest.mark.skip
def test_mit_license_deploy():
pass
@pytest.mark.skip
def test_bsd_license_deploy():
pass
@pytest.mark.skip
def test_gnu_license_deploy():
pass
@pytest.mark.skip
def test_apache_license_deploy():
pass
@pytest.mark.skip
def test_unlicensed_license_deploy():
pass
@pytest.mark.skip
def test_none_license_deploy():
pass
| true | true |
f724e874d78e8be3faac5983bcecca02a0597e59 | 4,291 | py | Python | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2348.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=36
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=15
prog.cz(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=23
prog.cz(input_qubit[2],input_qubit[0]) # number=24
prog.h(input_qubit[0]) # number=25
prog.y(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.cx(input_qubit[2],input_qubit[0]) # number=18
prog.h(input_qubit[0]) # number=26
prog.x(input_qubit[2]) # number=31
prog.cz(input_qubit[2],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2348.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.172131 | 165 | 0.655558 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.cx(input_qubit[2],input_qubit[0])
prog.cx(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[2])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2348.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f724e8a3de033be4e0d6edde1760bbeabfca72f8 | 1,538 | py | Python | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-legacy/generated/test/test_queue_item_impl.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.queue_item_impl import QueueItemImpl # noqa: E501
from openapi_client.rest import ApiException
class TestQueueItemImpl(unittest.TestCase):
"""QueueItemImpl unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test QueueItemImpl
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.queue_item_impl.QueueItemImpl() # noqa: E501
if include_optional :
return QueueItemImpl(
_class = '',
expected_build_number = 56,
id = '',
pipeline = '',
queued_time = 56
)
else :
return QueueItemImpl(
)
def testQueueItemImpl(self):
"""Test QueueItemImpl"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 26.982456 | 85 | 0.641743 |
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.queue_item_impl import QueueItemImpl
from openapi_client.rest import ApiException
class TestQueueItemImpl(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return QueueItemImpl(
_class = '',
expected_build_number = 56,
id = '',
pipeline = '',
queued_time = 56
)
else :
return QueueItemImpl(
)
def testQueueItemImpl(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
f724e8d11edae12200491270eda330db309592bd | 927 | py | Python | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 39 | 2020-10-27T13:17:37.000Z | 2022-03-17T11:04:39.000Z | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 9 | 2020-10-27T14:44:48.000Z | 2022-01-19T04:46:58.000Z | test/test_create_records.py | Borye/vika.py | 7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f | [
"MIT"
] | 8 | 2020-10-27T15:12:34.000Z | 2022-01-19T14:23:15.000Z | import unittest
import time
from vika import Vika
from . import TEST_TABLE, TEST_API_BASE, TEST_API_TOKEN
class TestCreateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_create(self):
time.sleep(1)
record = self.dst.records.create({
"title": "高等数学"
})
time.sleep(1)
self.assertIsNotNone(record._id)
records = self.dst.records.bulk_create([
{
"title": "离散数学"
},
{
"title": "线性代数"
}
])
self.created_records = records + [record]
for rec in records:
self.assertIsNotNone(rec._id)
def tearDown(self):
self.dst.delete_records(self.created_records)
if __name__ == '__main__':
unittest.main()
| 24.394737 | 55 | 0.577131 | import unittest
import time
from vika import Vika
from . import TEST_TABLE, TEST_API_BASE, TEST_API_TOKEN
class TestCreateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_create(self):
time.sleep(1)
record = self.dst.records.create({
"title": "高等数学"
})
time.sleep(1)
self.assertIsNotNone(record._id)
records = self.dst.records.bulk_create([
{
"title": "离散数学"
},
{
"title": "线性代数"
}
])
self.created_records = records + [record]
for rec in records:
self.assertIsNotNone(rec._id)
def tearDown(self):
self.dst.delete_records(self.created_records)
if __name__ == '__main__':
unittest.main()
| true | true |
f724e8e9d1a8e16562e5a832d20c371877bd9ce1 | 17,324 | py | Python | openmaptiles/mbtile_tools.py | smellman/openmaptiles-tools | c310d1a57d60477c0452575c5b1983bce3fffac2 | [
"MIT"
] | 3 | 2021-02-02T10:16:43.000Z | 2021-06-14T20:00:06.000Z | openmaptiles/mbtile_tools.py | smellman/openmaptiles-tools | c310d1a57d60477c0452575c5b1983bce3fffac2 | [
"MIT"
] | 1 | 2021-02-23T17:02:14.000Z | 2021-02-23T17:02:14.000Z | openmaptiles/mbtile_tools.py | isabella232/openmaptiles-tools | 84e76e7dd5e7118de8dd11f1945607de04d3ea0e | [
"MIT"
] | 1 | 2020-08-13T09:01:10.000Z | 2020-08-13T09:01:10.000Z | import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
"""Search mbtiles for frequently used duplicate tiles"""
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
"""Generate batches of tiles to be processed for the new zoom,
based on the previous zoom level. Each yield contains two batches:
one with "empty" tiles (those that match known keys),
and another with non-empty tiles (only if limit_to_keys is False).
The first batch can be inserted into mbtiles db as is.
The second batch will be used as a list of tiles to be generated.
"""
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({','.join(('?' * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
# mbtiles uses inverted Y (starts at the bottom)
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
# Convert tileset to the metadata object according to mbtiles 1.3 spec
# https://github.com/mapbox/mbtiles-spec/blob/master/1.3/spec.md#content
metadata = dict(
# MUST
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
# SHOULD
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
# MAY
attribution=ts.attribution,
description=ts.description,
version=ts.version,
# EXTRAS
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
# noinspection SqlWithoutWhere
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(','.join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
| 40.571429 | 90 | 0.510102 | import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({','.join(('?' * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
metadata = dict(
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
attribution=ts.attribution,
description=ts.description,
version=ts.version,
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(','.join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
| true | true |
f724e92c199fe24cf4485298fdf880c51432d6c6 | 6,498 | py | Python | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | 1 | 2019-10-30T06:43:45.000Z | 2019-10-30T06:43:45.000Z | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | null | null | null | onnx_chainer/functions/__init__.py | blakexu/chainer | f3c2948af2796bb5096f628220fd7321120e1a75 | [
"MIT"
] | null | null | null | from onnx_chainer.functions.activation import convert_ClippedReLU # NOQA
from onnx_chainer.functions.activation import convert_ELU # NOQA
from onnx_chainer.functions.activation import convert_HardSigmoid # NOQA
from onnx_chainer.functions.activation import convert_LeakyReLU # NOQA
from onnx_chainer.functions.activation import convert_LogSoftmax # NOQA
from onnx_chainer.functions.activation import convert_PReLUFunction # NOQA
from onnx_chainer.functions.activation import convert_ReLU # NOQA
from onnx_chainer.functions.activation import convert_Selu # NOQA
from onnx_chainer.functions.activation import convert_Sigmoid # NOQA
from onnx_chainer.functions.activation import convert_Softmax # NOQA
from onnx_chainer.functions.activation import convert_Softplus # NOQA
from onnx_chainer.functions.activation import convert_Tanh # NOQA
from onnx_chainer.functions.array import convert_Cast # NOQA
from onnx_chainer.functions.array import convert_Concat # NOQA
from onnx_chainer.functions.array import convert_Copy # NOQA
from onnx_chainer.functions.array import convert_Depth2Space # NOQA
from onnx_chainer.functions.array import convert_Dstack # NOQA
from onnx_chainer.functions.array import convert_ExpandDims # NOQA
from onnx_chainer.functions.array import convert_GetItem # NOQA
from onnx_chainer.functions.array import convert_Hstack # NOQA
from onnx_chainer.functions.array import convert_Moveaxis # NOQA
from onnx_chainer.functions.array import convert_Pad # NOQA
from onnx_chainer.functions.array import convert_Repeat # NOQA
from onnx_chainer.functions.array import convert_Reshape # NOQA
from onnx_chainer.functions.array import convert_ResizeImages # NOQA
from onnx_chainer.functions.array import convert_Separate # NOQA
from onnx_chainer.functions.array import convert_Shape # NOQA
from onnx_chainer.functions.array import convert_Space2Depth # NOQA
from onnx_chainer.functions.array import convert_SplitAxis # NOQA
from onnx_chainer.functions.array import convert_Squeeze # NOQA
from onnx_chainer.functions.array import convert_Stack # NOQA
from onnx_chainer.functions.array import convert_Swapaxes # NOQA
from onnx_chainer.functions.array import convert_Tile # NOQA
from onnx_chainer.functions.array import convert_Transpose # NOQA
from onnx_chainer.functions.array import convert_Vstack # NOQA
from onnx_chainer.functions.array import convert_Where # NOQA
from onnx_chainer.functions.connection import convert_Convolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_ConvolutionND # NOQA
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_DeconvolutionND # NOQA
from onnx_chainer.functions.connection import convert_EmbedIDFunction # NOQA
from onnx_chainer.functions.connection import convert_LinearFunction # NOQA
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy # NOQA
from onnx_chainer.functions.math import convert_Absolute # NOQA
from onnx_chainer.functions.math import convert_Add # NOQA
from onnx_chainer.functions.math import convert_AddConstant # NOQA
from onnx_chainer.functions.math import convert_Arccos # NOQA
from onnx_chainer.functions.math import convert_Arcsin # NOQA
from onnx_chainer.functions.math import convert_Arctan # NOQA
from onnx_chainer.functions.math import convert_ArgMax # NOQA
from onnx_chainer.functions.math import convert_ArgMin # NOQA
from onnx_chainer.functions.math import convert_BroadcastTo # NOQA
from onnx_chainer.functions.math import convert_Clip # NOQA
from onnx_chainer.functions.math import convert_Cos # NOQA
from onnx_chainer.functions.math import convert_Cosh # NOQA
from onnx_chainer.functions.math import convert_Div # NOQA
from onnx_chainer.functions.math import convert_DivFromConstant # NOQA
from onnx_chainer.functions.math import convert_Exp # NOQA
from onnx_chainer.functions.math import convert_Identity # NOQA
from onnx_chainer.functions.math import convert_LinearInterpolate # NOQA
from onnx_chainer.functions.math import convert_Log # NOQA
from onnx_chainer.functions.math import convert_LogSumExp # NOQA
from onnx_chainer.functions.math import convert_MatMul # NOQA
from onnx_chainer.functions.math import convert_Max # NOQA
from onnx_chainer.functions.math import convert_Maximum # NOQA
from onnx_chainer.functions.math import convert_Mean # NOQA
from onnx_chainer.functions.math import convert_Min # NOQA
from onnx_chainer.functions.math import convert_Minimum # NOQA
from onnx_chainer.functions.math import convert_Mul # NOQA
from onnx_chainer.functions.math import convert_MulConstant # NOQA
from onnx_chainer.functions.math import convert_Neg # NOQA
from onnx_chainer.functions.math import convert_PowConstVar # NOQA
from onnx_chainer.functions.math import convert_PowVarConst # NOQA
from onnx_chainer.functions.math import convert_PowVarVar # NOQA
from onnx_chainer.functions.math import convert_Prod # NOQA
from onnx_chainer.functions.math import convert_RsqrtGPU # NOQA
from onnx_chainer.functions.math import convert_Sin # NOQA
from onnx_chainer.functions.math import convert_Sinh # NOQA
from onnx_chainer.functions.math import convert_Sqrt # NOQA
from onnx_chainer.functions.math import convert_Square # NOQA
from onnx_chainer.functions.math import convert_Sub # NOQA
from onnx_chainer.functions.math import convert_SubFromConstant # NOQA
from onnx_chainer.functions.math import convert_Sum # NOQA
from onnx_chainer.functions.math import convert_Tan # NOQA
from onnx_chainer.functions.noise import convert_Dropout # NOQA
from onnx_chainer.functions.normalization import convert_BatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_GroupNormalization # NOQA
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization # NOQA
from onnx_chainer.functions.normalization import convert_NormalizeL2 # NOQA
from onnx_chainer.functions.pooling import convert_AveragePooling2D # NOQA
from onnx_chainer.functions.pooling import convert_AveragePoolingND # NOQA
from onnx_chainer.functions.pooling import convert_MaxPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_MaxPoolingND # NOQA
from onnx_chainer.functions.pooling import convert_ROIPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_Unpooling2D # NOQA
| 62.480769 | 91 | 0.851185 | from onnx_chainer.functions.activation import convert_ClippedReLU
from onnx_chainer.functions.activation import convert_ELU
from onnx_chainer.functions.activation import convert_HardSigmoid
from onnx_chainer.functions.activation import convert_LeakyReLU
from onnx_chainer.functions.activation import convert_LogSoftmax
from onnx_chainer.functions.activation import convert_PReLUFunction
from onnx_chainer.functions.activation import convert_ReLU
from onnx_chainer.functions.activation import convert_Selu
from onnx_chainer.functions.activation import convert_Sigmoid
from onnx_chainer.functions.activation import convert_Softmax
from onnx_chainer.functions.activation import convert_Softplus
from onnx_chainer.functions.activation import convert_Tanh
from onnx_chainer.functions.array import convert_Cast
from onnx_chainer.functions.array import convert_Concat
from onnx_chainer.functions.array import convert_Copy
from onnx_chainer.functions.array import convert_Depth2Space
from onnx_chainer.functions.array import convert_Dstack
from onnx_chainer.functions.array import convert_ExpandDims
from onnx_chainer.functions.array import convert_GetItem
from onnx_chainer.functions.array import convert_Hstack
from onnx_chainer.functions.array import convert_Moveaxis
from onnx_chainer.functions.array import convert_Pad
from onnx_chainer.functions.array import convert_Repeat
from onnx_chainer.functions.array import convert_Reshape
from onnx_chainer.functions.array import convert_ResizeImages
from onnx_chainer.functions.array import convert_Separate
from onnx_chainer.functions.array import convert_Shape
from onnx_chainer.functions.array import convert_Space2Depth
from onnx_chainer.functions.array import convert_SplitAxis
from onnx_chainer.functions.array import convert_Squeeze
from onnx_chainer.functions.array import convert_Stack
from onnx_chainer.functions.array import convert_Swapaxes
from onnx_chainer.functions.array import convert_Tile
from onnx_chainer.functions.array import convert_Transpose
from onnx_chainer.functions.array import convert_Vstack
from onnx_chainer.functions.array import convert_Where
from onnx_chainer.functions.connection import convert_Convolution2DFunction
from onnx_chainer.functions.connection import convert_ConvolutionND
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction
from onnx_chainer.functions.connection import convert_DeconvolutionND
from onnx_chainer.functions.connection import convert_EmbedIDFunction
from onnx_chainer.functions.connection import convert_LinearFunction
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy
from onnx_chainer.functions.math import convert_Absolute
from onnx_chainer.functions.math import convert_Add
from onnx_chainer.functions.math import convert_AddConstant
from onnx_chainer.functions.math import convert_Arccos
from onnx_chainer.functions.math import convert_Arcsin
from onnx_chainer.functions.math import convert_Arctan
from onnx_chainer.functions.math import convert_ArgMax
from onnx_chainer.functions.math import convert_ArgMin
from onnx_chainer.functions.math import convert_BroadcastTo
from onnx_chainer.functions.math import convert_Clip
from onnx_chainer.functions.math import convert_Cos
from onnx_chainer.functions.math import convert_Cosh
from onnx_chainer.functions.math import convert_Div
from onnx_chainer.functions.math import convert_DivFromConstant
from onnx_chainer.functions.math import convert_Exp
from onnx_chainer.functions.math import convert_Identity
from onnx_chainer.functions.math import convert_LinearInterpolate
from onnx_chainer.functions.math import convert_Log
from onnx_chainer.functions.math import convert_LogSumExp
from onnx_chainer.functions.math import convert_MatMul
from onnx_chainer.functions.math import convert_Max
from onnx_chainer.functions.math import convert_Maximum
from onnx_chainer.functions.math import convert_Mean
from onnx_chainer.functions.math import convert_Min
from onnx_chainer.functions.math import convert_Minimum
from onnx_chainer.functions.math import convert_Mul
from onnx_chainer.functions.math import convert_MulConstant
from onnx_chainer.functions.math import convert_Neg
from onnx_chainer.functions.math import convert_PowConstVar
from onnx_chainer.functions.math import convert_PowVarConst
from onnx_chainer.functions.math import convert_PowVarVar
from onnx_chainer.functions.math import convert_Prod
from onnx_chainer.functions.math import convert_RsqrtGPU
from onnx_chainer.functions.math import convert_Sin
from onnx_chainer.functions.math import convert_Sinh
from onnx_chainer.functions.math import convert_Sqrt
from onnx_chainer.functions.math import convert_Square
from onnx_chainer.functions.math import convert_Sub
from onnx_chainer.functions.math import convert_SubFromConstant
from onnx_chainer.functions.math import convert_Sum
from onnx_chainer.functions.math import convert_Tan
from onnx_chainer.functions.noise import convert_Dropout
from onnx_chainer.functions.normalization import convert_BatchNormalization
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization
from onnx_chainer.functions.normalization import convert_GroupNormalization
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization
from onnx_chainer.functions.normalization import convert_NormalizeL2
from onnx_chainer.functions.pooling import convert_AveragePooling2D
from onnx_chainer.functions.pooling import convert_AveragePoolingND
from onnx_chainer.functions.pooling import convert_MaxPooling2D
from onnx_chainer.functions.pooling import convert_MaxPoolingND
from onnx_chainer.functions.pooling import convert_ROIPooling2D
from onnx_chainer.functions.pooling import convert_Unpooling2D
| true | true |
f724eb2bf2b936eabc0cf6b12314246ce61bb4cc | 244 | py | Python | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | 2 | 2020-03-23T18:32:13.000Z | 2020-12-11T10:54:08.000Z | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | null | null | null | crypten/common/__init__.py | vreis/CrypTen-2 | 839a751277a901e4edd9166a720fb3a29deac641 | [
"MIT"
] | 2 | 2020-04-15T19:28:02.000Z | 2020-04-16T01:59:30.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["rng", "tensor_types", "util"]
| 27.111111 | 65 | 0.721311 |
__all__ = ["rng", "tensor_types", "util"]
| true | true |
f724ebf9502cb921a15388d5af77f9d5423ced5c | 104,651 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_firewall_policy6.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_policy6
short_description: Configure IPv6 policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
firewall_policy6:
description:
- Configure IPv6 policies.
default: null
type: dict
suboptions:
action:
description:
- Policy action (allow/deny/ipsec).
type: str
choices:
- accept
- deny
- ipsec
anti_replay:
description:
- Enable/disable anti-replay check.
type: str
choices:
- enable
- disable
app_category:
description:
- Application category ID list.
type: list
suboptions:
id:
description:
- Category IDs.
required: true
type: int
app_group:
description:
- Application group names.
type: list
suboptions:
name:
description:
- Application group names. Source application.group.name.
required: true
type: str
application:
description:
- Application ID list.
type: list
suboptions:
id:
description:
- Application IDs.
required: true
type: int
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
auto_asic_offload:
description:
- Enable/disable policy traffic ASIC offloading.
type: str
choices:
- enable
- disable
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
cifs_profile:
description:
- Name of an existing CIFS profile. Source cifs.profile.name.
type: str
comments:
description:
- Comment.
type: str
custom_log_fields:
description:
- Log field index numbers to append custom log fields to log messages for this policy.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
devices:
description:
- Names of devices or device groups that can be matched by the policy.
type: list
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
type: str
diffserv_forward:
description:
- Enable to change packet"s DiffServ values to the specified diffservcode-forward value.
type: str
choices:
- enable
- disable
diffserv_reverse:
description:
- Enable to change packet"s reverse (reply) DiffServ values to the specified diffservcode-rev value.
type: str
choices:
- enable
- disable
diffservcode_forward:
description:
- Change packet"s DiffServ to this value.
type: str
diffservcode_rev:
description:
- Change packet"s reverse (reply) DiffServ to this value.
type: str
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dnsfilter_profile:
description:
- Name of an existing DNS filter profile. Source dnsfilter.profile.name.
type: str
dscp_match:
description:
- Enable DSCP check.
type: str
choices:
- enable
- disable
dscp_negate:
description:
- Enable negated DSCP match.
type: str
choices:
- enable
- disable
dscp_value:
description:
- DSCP value.
type: str
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Destination address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name.
required: true
type: str
dstaddr_negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
type: str
choices:
- enable
- disable
dstintf:
description:
- Outgoing (egress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
emailfilter_profile:
description:
- Name of an existing email filter profile. Source emailfilter.profile.name.
type: str
firewall_session_dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
type: str
choices:
- check-all
- check-new
fixedport:
description:
- Enable to prevent source NAT from changing a session"s source port.
type: str
choices:
- enable
- disable
fsso_groups:
description:
- Names of FSSO groups.
type: list
suboptions:
name:
description:
- Names of FSSO groups. Source user.adgrp.name.
required: true
type: str
global_label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
type: str
groups:
description:
- Names of user groups that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
http_policy_redirect:
description:
- Redirect HTTP(S) traffic to matching transparent web proxy policy.
type: str
choices:
- enable
- disable
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
inbound:
description:
- 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
type: str
choices:
- enable
- disable
inspection_mode:
description:
- Policy inspection mode (Flow/proxy). Default is Flow mode.
type: str
choices:
- proxy
- flow
ippool:
description:
- Enable to use IP Pools for source NAT.
type: str
choices:
- enable
- disable
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
type: str
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
type: str
choices:
- all
- utm
- disable
logtraffic_start:
description:
- Record logs when a session starts and ends.
type: str
choices:
- enable
- disable
mms_profile:
description:
- Name of an existing MMS profile. Source firewall.mms-profile.name.
type: str
name:
description:
- Policy name.
type: str
nat:
description:
- Enable/disable source NAT.
type: str
choices:
- enable
- disable
natinbound:
description:
- 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
type: str
choices:
- enable
- disable
natoutbound:
description:
- 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
type: str
choices:
- enable
- disable
np_acceleration:
description:
- Enable/disable UTM Network Processor acceleration.
type: str
choices:
- enable
- disable
outbound:
description:
- 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
type: str
choices:
- enable
- disable
per_ip_shaper:
description:
- Per-IP traffic shaper. Source firewall.shaper.per-ip-shaper.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
poolname:
description:
- IP Pool names.
type: list
suboptions:
name:
description:
- IP pool name. Source firewall.ippool6.name.
required: true
type: str
profile_group:
description:
- Name of profile group. Source firewall.profile-group.name.
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
type: str
choices:
- single
- group
replacemsg_override_group:
description:
- Override the default replacement message group for this policy. Source system.replacemsg-group.name.
type: str
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
type: str
choices:
- enable
- disable
schedule:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
type: str
send_deny_packet:
description:
- Enable/disable return of deny-packet.
type: str
choices:
- enable
- disable
service:
description:
- Service and service group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
service_negate:
description:
- When enabled service specifies what the service must NOT be.
type: str
choices:
- enable
- disable
session_ttl:
description:
- Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.
type: int
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
srcaddr:
description:
- Source address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
srcaddr_negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
type: str
choices:
- enable
- disable
srcintf:
description:
- Incoming (ingress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssh_policy_redirect:
description:
- Redirect SSH traffic to matching transparent proxy policy.
type: str
choices:
- enable
- disable
ssl_mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
type: str
choices:
- enable
- disable
ssl_mirror_intf:
description:
- SSL mirror interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
status:
description:
- Enable or disable this policy.
type: str
choices:
- enable
- disable
tcp_mss_receiver:
description:
- Receiver TCP maximum segment size (MSS).
type: int
tcp_mss_sender:
description:
- Sender TCP maximum segment size (MSS).
type: int
tcp_session_without_syn:
description:
- Enable/disable creation of TCP session without SYN flag.
type: str
choices:
- all
- data-only
- disable
timeout_send_rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
type: str
choices:
- enable
- disable
tos:
description:
- ToS (Type of Service) value used for comparison.
type: str
tos_mask:
description:
- Non-zero bit positions are used for comparison while zero bit positions are ignored.
type: str
tos_negate:
description:
- Enable negated TOS match.
type: str
choices:
- enable
- disable
traffic_shaper:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
traffic_shaper_reverse:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
url_category:
description:
- URL category ID list.
type: list
suboptions:
id:
description:
- URL category ID.
required: true
type: int
users:
description:
- Names of individual users that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Names of individual users that can authenticate with this policy. Source user.local.name.
required: true
type: str
utm_status:
description:
- Enable AV/web/ips protection profile.
type: str
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
vlan_cos_fwd:
description:
- 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_cos_rev:
description:
- 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_filter:
description:
- Set VLAN filters.
type: str
voip_profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
type: str
vpntunnel:
description:
- 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1. Source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name.'
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webcache:
description:
- Enable/disable web cache.
type: str
choices:
- enable
- disable
webcache_https:
description:
- Enable/disable web cache for HTTPS.
type: str
choices:
- disable
- enable
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
webproxy_forward_server:
description:
- Web proxy forward server name. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
webproxy_profile:
description:
- Webproxy profile name. Source web-proxy.profile.name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_firewall_policy6
fortios_firewall_policy6:
vdom: root
state: present
firewall_policy6:
action: deny
anti_replay: enable
auto_asic_offload: enable
diffserv_forward: disable
diffserv_reverse: disable
diffservcode_forward: '000000'
diffservcode_rev: '000000'
dsri: disable
dstaddr:
- name: all
dstaddr_negate: disable
dstintf:
- name: port3
firewall_session_dirty: check-all
fixedport: disable
http_policy_redirect: disable
inbound: disable
inspection_mode: flow
ippool: disable
logtraffic: disable
logtraffic_start: disable
name: policy6p1
nat: disable
natinbound: disable
natoutbound: disable
outbound: disable
policyid: 1
profile_type: single
rsso: disable
schedule: always
send_deny_packet: disable
service:
- name: ALL
service_negate: disable
srcaddr:
- name: all
srcaddr_negate: disable
srcintf:
- name: port4
ssh_policy_redirect: disable
ssl_mirror: disable
status: enable
tcp_mss_receiver: 0
tcp_mss_sender: 0
tcp_session_without_syn: disable
timeout_send_rst: disable
tos: '0x00'
tos_mask: '0x00'
tos_negate: disable
utm_status: disable
vlan_cos_fwd: 0
vlan_cos_rev: 0
webcache: disable
webcache_https: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_firewall_policy6_data(json):
option_list = ['action', 'anti_replay', 'app_category',
'app_group', 'application', 'application_list',
'auto_asic_offload', 'av_profile', 'cifs_profile',
'comments', 'custom_log_fields', 'devices',
'diffserv_forward', 'diffserv_reverse', 'diffservcode_forward',
'diffservcode_rev', 'dlp_sensor', 'dnsfilter_profile',
'dscp_match', 'dscp_negate', 'dscp_value',
'dsri', 'dstaddr', 'dstaddr_negate',
'dstintf', 'emailfilter_profile', 'firewall_session_dirty',
'fixedport', 'fsso_groups', 'global_label',
'groups', 'http_policy_redirect', 'icap_profile',
'inbound', 'inspection_mode', 'ippool',
'ips_sensor', 'label', 'logtraffic',
'logtraffic_start', 'mms_profile', 'name',
'nat', 'natinbound', 'natoutbound',
'np_acceleration', 'outbound', 'per_ip_shaper',
'policyid', 'poolname', 'profile_group',
'profile_protocol_options', 'profile_type', 'replacemsg_override_group',
'rsso', 'schedule', 'send_deny_packet',
'service', 'service_negate', 'session_ttl',
'spamfilter_profile', 'srcaddr', 'srcaddr_negate',
'srcintf', 'ssh_filter_profile', 'ssh_policy_redirect',
'ssl_mirror', 'ssl_mirror_intf', 'ssl_ssh_profile',
'status', 'tcp_mss_receiver', 'tcp_mss_sender',
'tcp_session_without_syn', 'timeout_send_rst', 'tos',
'tos_mask', 'tos_negate', 'traffic_shaper',
'traffic_shaper_reverse', 'url_category', 'users',
'utm_status', 'uuid', 'vlan_cos_fwd',
'vlan_cos_rev', 'vlan_filter', 'voip_profile',
'vpntunnel', 'waf_profile', 'webcache',
'webcache_https', 'webfilter_profile', 'webproxy_forward_server',
'webproxy_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_policy6(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
firewall_policy6_data = data['firewall_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_policy6_data(firewall_policy6_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('firewall', 'policy6', filtered_data, vdom=vdom)
current_data = fos.get('firewall', 'policy6', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('firewall',
'policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall_policy6')
if data['firewall_policy6']:
resp = firewall_policy6(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('firewall_policy6'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"per_ip_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_forward_server": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dscp_match": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"diffserv_reverse": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper_reverse": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"uuid": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vpntunnel": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dlp_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"custom_log_fields": {
"type": "list",
"children": {
"field_id": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"voip_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"np_acceleration": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fsso_groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"emailfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natoutbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"logtraffic": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "utm",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"spamfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"ssh_filter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_cos_rev": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_session_without_syn": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "data-only",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"url_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"session_ttl": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mms_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"poolname": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_ssh_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"comments": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"app_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_type": {
"type": "string",
"options": [
{
"value": "single",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "group",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"schedule": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_rev": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_sender": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror_intf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"auto_asic_offload": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_filter": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssh_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"anti_replay": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"action": {
"type": "string",
"options": [
{
"value": "accept",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "deny",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ipsec",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"replacemsg_override_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"http_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"icap_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application_list": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"send_deny_packet": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ippool": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"tos": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dnsfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"profile_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_receiver": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"global_label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"inbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_mask": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"logtraffic_start": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache_https": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ips_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"devices": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.2.3": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"rsso": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffserv_forward": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natinbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cifs_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"vlan_cos_fwd": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fixedport": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dsri": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"nat": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"timeout_send_rst": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_forward": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"users": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_value": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"utm_status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"waf_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"policyid": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"firewall_session_dirty": {
"type": "string",
"options": [
{
"value": "check-all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "check-new",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"app_group": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_protocol_options": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"inspection_mode": {
"type": "string",
"options": [
{
"value": "proxy",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "flow",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["firewall_policy6"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["firewall_policy6"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "firewall_policy6")
is_error, has_changed, result = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.230059 | 144 | 0.32553 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_policy6
short_description: Configure IPv6 policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and policy6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
firewall_policy6:
description:
- Configure IPv6 policies.
default: null
type: dict
suboptions:
action:
description:
- Policy action (allow/deny/ipsec).
type: str
choices:
- accept
- deny
- ipsec
anti_replay:
description:
- Enable/disable anti-replay check.
type: str
choices:
- enable
- disable
app_category:
description:
- Application category ID list.
type: list
suboptions:
id:
description:
- Category IDs.
required: true
type: int
app_group:
description:
- Application group names.
type: list
suboptions:
name:
description:
- Application group names. Source application.group.name.
required: true
type: str
application:
description:
- Application ID list.
type: list
suboptions:
id:
description:
- Application IDs.
required: true
type: int
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
auto_asic_offload:
description:
- Enable/disable policy traffic ASIC offloading.
type: str
choices:
- enable
- disable
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
cifs_profile:
description:
- Name of an existing CIFS profile. Source cifs.profile.name.
type: str
comments:
description:
- Comment.
type: str
custom_log_fields:
description:
- Log field index numbers to append custom log fields to log messages for this policy.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
devices:
description:
- Names of devices or device groups that can be matched by the policy.
type: list
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
type: str
diffserv_forward:
description:
- Enable to change packet"s DiffServ values to the specified diffservcode-forward value.
type: str
choices:
- enable
- disable
diffserv_reverse:
description:
- Enable to change packet"s reverse (reply) DiffServ values to the specified diffservcode-rev value.
type: str
choices:
- enable
- disable
diffservcode_forward:
description:
- Change packet"s DiffServ to this value.
type: str
diffservcode_rev:
description:
- Change packet"s reverse (reply) DiffServ to this value.
type: str
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dnsfilter_profile:
description:
- Name of an existing DNS filter profile. Source dnsfilter.profile.name.
type: str
dscp_match:
description:
- Enable DSCP check.
type: str
choices:
- enable
- disable
dscp_negate:
description:
- Enable negated DSCP match.
type: str
choices:
- enable
- disable
dscp_value:
description:
- DSCP value.
type: str
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Destination address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name.
required: true
type: str
dstaddr_negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
type: str
choices:
- enable
- disable
dstintf:
description:
- Outgoing (egress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
type: str
emailfilter_profile:
description:
- Name of an existing email filter profile. Source emailfilter.profile.name.
type: str
firewall_session_dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
type: str
choices:
- check-all
- check-new
fixedport:
description:
- Enable to prevent source NAT from changing a session"s source port.
type: str
choices:
- enable
- disable
fsso_groups:
description:
- Names of FSSO groups.
type: list
suboptions:
name:
description:
- Names of FSSO groups. Source user.adgrp.name.
required: true
type: str
global_label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
type: str
groups:
description:
- Names of user groups that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
http_policy_redirect:
description:
- Redirect HTTP(S) traffic to matching transparent web proxy policy.
type: str
choices:
- enable
- disable
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
inbound:
description:
- 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
type: str
choices:
- enable
- disable
inspection_mode:
description:
- Policy inspection mode (Flow/proxy). Default is Flow mode.
type: str
choices:
- proxy
- flow
ippool:
description:
- Enable to use IP Pools for source NAT.
type: str
choices:
- enable
- disable
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
type: str
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
type: str
choices:
- all
- utm
- disable
logtraffic_start:
description:
- Record logs when a session starts and ends.
type: str
choices:
- enable
- disable
mms_profile:
description:
- Name of an existing MMS profile. Source firewall.mms-profile.name.
type: str
name:
description:
- Policy name.
type: str
nat:
description:
- Enable/disable source NAT.
type: str
choices:
- enable
- disable
natinbound:
description:
- 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
type: str
choices:
- enable
- disable
natoutbound:
description:
- 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
type: str
choices:
- enable
- disable
np_acceleration:
description:
- Enable/disable UTM Network Processor acceleration.
type: str
choices:
- enable
- disable
outbound:
description:
- 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
type: str
choices:
- enable
- disable
per_ip_shaper:
description:
- Per-IP traffic shaper. Source firewall.shaper.per-ip-shaper.name.
type: str
policyid:
description:
- Policy ID.
required: true
type: int
poolname:
description:
- IP Pool names.
type: list
suboptions:
name:
description:
- IP pool name. Source firewall.ippool6.name.
required: true
type: str
profile_group:
description:
- Name of profile group. Source firewall.profile-group.name.
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
type: str
choices:
- single
- group
replacemsg_override_group:
description:
- Override the default replacement message group for this policy. Source system.replacemsg-group.name.
type: str
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
type: str
choices:
- enable
- disable
schedule:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
type: str
send_deny_packet:
description:
- Enable/disable return of deny-packet.
type: str
choices:
- enable
- disable
service:
description:
- Service and service group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
service_negate:
description:
- When enabled service specifies what the service must NOT be.
type: str
choices:
- enable
- disable
session_ttl:
description:
- Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.
type: int
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
srcaddr:
description:
- Source address and address group names.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
srcaddr_negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
type: str
choices:
- enable
- disable
srcintf:
description:
- Incoming (ingress) interface.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssh_policy_redirect:
description:
- Redirect SSH traffic to matching transparent proxy policy.
type: str
choices:
- enable
- disable
ssl_mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
type: str
choices:
- enable
- disable
ssl_mirror_intf:
description:
- SSL mirror interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
status:
description:
- Enable or disable this policy.
type: str
choices:
- enable
- disable
tcp_mss_receiver:
description:
- Receiver TCP maximum segment size (MSS).
type: int
tcp_mss_sender:
description:
- Sender TCP maximum segment size (MSS).
type: int
tcp_session_without_syn:
description:
- Enable/disable creation of TCP session without SYN flag.
type: str
choices:
- all
- data-only
- disable
timeout_send_rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
type: str
choices:
- enable
- disable
tos:
description:
- ToS (Type of Service) value used for comparison.
type: str
tos_mask:
description:
- Non-zero bit positions are used for comparison while zero bit positions are ignored.
type: str
tos_negate:
description:
- Enable negated TOS match.
type: str
choices:
- enable
- disable
traffic_shaper:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
traffic_shaper_reverse:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
type: str
url_category:
description:
- URL category ID list.
type: list
suboptions:
id:
description:
- URL category ID.
required: true
type: int
users:
description:
- Names of individual users that can authenticate with this policy.
type: list
suboptions:
name:
description:
- Names of individual users that can authenticate with this policy. Source user.local.name.
required: true
type: str
utm_status:
description:
- Enable AV/web/ips protection profile.
type: str
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
vlan_cos_fwd:
description:
- 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_cos_rev:
description:
- 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
type: int
vlan_filter:
description:
- Set VLAN filters.
type: str
voip_profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
type: str
vpntunnel:
description:
- 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1. Source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name.'
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webcache:
description:
- Enable/disable web cache.
type: str
choices:
- enable
- disable
webcache_https:
description:
- Enable/disable web cache for HTTPS.
type: str
choices:
- disable
- enable
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
webproxy_forward_server:
description:
- Web proxy forward server name. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
webproxy_profile:
description:
- Webproxy profile name. Source web-proxy.profile.name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_firewall_policy6
fortios_firewall_policy6:
vdom: root
state: present
firewall_policy6:
action: deny
anti_replay: enable
auto_asic_offload: enable
diffserv_forward: disable
diffserv_reverse: disable
diffservcode_forward: '000000'
diffservcode_rev: '000000'
dsri: disable
dstaddr:
- name: all
dstaddr_negate: disable
dstintf:
- name: port3
firewall_session_dirty: check-all
fixedport: disable
http_policy_redirect: disable
inbound: disable
inspection_mode: flow
ippool: disable
logtraffic: disable
logtraffic_start: disable
name: policy6p1
nat: disable
natinbound: disable
natoutbound: disable
outbound: disable
policyid: 1
profile_type: single
rsso: disable
schedule: always
send_deny_packet: disable
service:
- name: ALL
service_negate: disable
srcaddr:
- name: all
srcaddr_negate: disable
srcintf:
- name: port4
ssh_policy_redirect: disable
ssl_mirror: disable
status: enable
tcp_mss_receiver: 0
tcp_mss_sender: 0
tcp_session_without_syn: disable
timeout_send_rst: disable
tos: '0x00'
tos_mask: '0x00'
tos_negate: disable
utm_status: disable
vlan_cos_fwd: 0
vlan_cos_rev: 0
webcache: disable
webcache_https: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_firewall_policy6_data(json):
option_list = ['action', 'anti_replay', 'app_category',
'app_group', 'application', 'application_list',
'auto_asic_offload', 'av_profile', 'cifs_profile',
'comments', 'custom_log_fields', 'devices',
'diffserv_forward', 'diffserv_reverse', 'diffservcode_forward',
'diffservcode_rev', 'dlp_sensor', 'dnsfilter_profile',
'dscp_match', 'dscp_negate', 'dscp_value',
'dsri', 'dstaddr', 'dstaddr_negate',
'dstintf', 'emailfilter_profile', 'firewall_session_dirty',
'fixedport', 'fsso_groups', 'global_label',
'groups', 'http_policy_redirect', 'icap_profile',
'inbound', 'inspection_mode', 'ippool',
'ips_sensor', 'label', 'logtraffic',
'logtraffic_start', 'mms_profile', 'name',
'nat', 'natinbound', 'natoutbound',
'np_acceleration', 'outbound', 'per_ip_shaper',
'policyid', 'poolname', 'profile_group',
'profile_protocol_options', 'profile_type', 'replacemsg_override_group',
'rsso', 'schedule', 'send_deny_packet',
'service', 'service_negate', 'session_ttl',
'spamfilter_profile', 'srcaddr', 'srcaddr_negate',
'srcintf', 'ssh_filter_profile', 'ssh_policy_redirect',
'ssl_mirror', 'ssl_mirror_intf', 'ssl_ssh_profile',
'status', 'tcp_mss_receiver', 'tcp_mss_sender',
'tcp_session_without_syn', 'timeout_send_rst', 'tos',
'tos_mask', 'tos_negate', 'traffic_shaper',
'traffic_shaper_reverse', 'url_category', 'users',
'utm_status', 'uuid', 'vlan_cos_fwd',
'vlan_cos_rev', 'vlan_filter', 'voip_profile',
'vpntunnel', 'waf_profile', 'webcache',
'webcache_https', 'webfilter_profile', 'webproxy_forward_server',
'webproxy_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_policy6(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
firewall_policy6_data = data['firewall_policy6']
filtered_data = underscore_to_hyphen(filter_firewall_policy6_data(firewall_policy6_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('firewall', 'policy6', filtered_data, vdom=vdom)
current_data = fos.get('firewall', 'policy6', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('firewall',
'policy6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall_policy6')
if data['firewall_policy6']:
resp = firewall_policy6(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('firewall_policy6'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"per_ip_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_forward_server": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dscp_match": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"diffserv_reverse": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper_reverse": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"uuid": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vpntunnel": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dlp_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"custom_log_fields": {
"type": "list",
"children": {
"field_id": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"voip_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"np_acceleration": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fsso_groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"emailfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natoutbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"logtraffic": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "utm",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"spamfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"ssh_filter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_cos_rev": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_session_without_syn": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "data-only",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"url_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"session_ttl": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mms_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"poolname": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_ssh_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"comments": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"app_category": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_type": {
"type": "string",
"options": [
{
"value": "single",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "group",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"schedule": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_rev": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_sender": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror_intf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"auto_asic_offload": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vlan_filter": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcintf": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssh_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"anti_replay": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"action": {
"type": "string",
"options": [
{
"value": "accept",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "deny",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ipsec",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"replacemsg_override_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"http_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"groups": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"icap_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application_list": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"send_deny_packet": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ippool": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"service": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webproxy_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"tos": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dnsfilter_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"profile_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tcp_mss_receiver": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"global_label": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"inbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"srcaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tos_mask": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"logtraffic_start": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache_https": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ips_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"devices": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.2.3": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"rsso": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"traffic_shaper": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffserv_forward": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"natinbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cifs_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"vlan_cos_fwd": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fixedport": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dsri": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbound": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"application": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ssl_mirror": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"nat": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"timeout_send_rst": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"diffservcode_forward": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"users": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dscp_value": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"utm_status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"waf_profile": {
"type": "string",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"policyid": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"firewall_session_dirty": {
"type": "string",
"options": [
{
"value": "check-all",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "check-new",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webfilter_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_profile": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"app_group": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_protocol_options": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"inspection_mode": {
"type": "string",
"options": [
{
"value": "proxy",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "flow",
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_policy6": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["firewall_policy6"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["firewall_policy6"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "firewall_policy6")
is_error, has_changed, result = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f724ef7c58e53166da599152abac034e13800121 | 368 | py | Python | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | copyspecial/my_test.py | rayedbar/google_python_exercises | 9b0903ab9acd91ca82d9568725139cfbb43edae6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import os
import subprocess
filename = 'haha.txt'
try:
f = open(filename, 'rU')
text = f.read()
f.close()
except IOError:
## Control jumps directly to here if any of the above lines throws IOError.
sys.stderr.write('problem reading:' + filename)
## In any case, the code then continues with the line after the try/except
| 21.647059 | 79 | 0.701087 |
import sys
import os
import subprocess
filename = 'haha.txt'
try:
f = open(filename, 'rU')
text = f.read()
f.close()
except IOError:
| true | true |
f724efb25c23769a08c939a3c661b1d41864648b | 7,801 | py | Python | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | 2 | 2021-12-12T03:45:18.000Z | 2021-12-21T03:53:23.000Z | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | 1 | 2022-03-26T15:13:29.000Z | 2022-03-26T15:13:29.000Z | laser-chess-backend/app/core/routers/users.py | tojatos/laser-tactics | 538bef7ab03bf35c0ef27e195001f6f7f12c1ba4 | [
"MIT"
] | null | null | null | from fastapi import Depends, HTTPException
from fastapi import status, APIRouter
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from app.core.dependecies import get_db, SECRET_KEY, ALGORITHM, TokenPurpose, get_current_active_user, get_current_user, \
verify_password
from app.core.internal import schemas, crud
from app.game_engine.models import *
router = APIRouter(
prefix="/users",
tags=["users"],
responses={404: {"error": "Not found"}, 422: {"error": "Invalid input data"}},
)
# TODO: test
@router.post("/verify/{token}")
def verify_user(token: str, db: Session = Depends(get_db)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
purpose = payload.get("purpose")
is_verifed = payload.get("hash")
if email is None or purpose != TokenPurpose.ACCOUNT_VERIFICATION:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
token_data = schemas.VerificationTokenData(email=email, purpose=purpose, hash=is_verifed)
except JWTError:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
user = crud.get_user_by_email(db, token_data.email)
if user is None:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
if user.is_verified:
raise HTTPException(status_code=200, detail='Account already confirmed. Please login.')
else:
crud.verify_user(user=user, db=db)
return {"detail": "Account verified successfully"}
# TODO: test
@router.post("/change_password")
def change_password(change_password_schema: schemas.EmergencyChangePasswordSchema, db: Session = Depends(get_db)):
try:
token = change_password_schema.token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
purpose = payload.get("purpose")
hash = payload.get("hash")
if username is None or purpose != TokenPurpose.CHANGE_PASSWORD:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
token_data = schemas.TokenData(username=username, purpose=purpose, hash=hash)
except JWTError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
user = crud.get_user(db, token_data.username)
if user is None:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
if user.hashed_password != hash:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
return crud.change_password(user, change_password_schema.newPassword, db)
@router.post("", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
db_user_1 = crud.get_user(db, username=user.username)
if db_user_1:
raise HTTPException(status_code=400, detail="This name is taken")
return crud.create_user(db=db, user=user)
@router.get("", response_model=List[schemas.UserGet])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@router.get("/{username}", response_model=schemas.UserGet)
def read_user(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@router.get("/me/blocked", response_model=List[str])
async def get_users_blocked(current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.get_blocked_users(user=current_user, db=db)
@router.post("/me/block", response_model=schemas.BlockedUsers)
async def block_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_block = crud.get_user(username=username, db=db)
if not user_to_block:
raise HTTPException(status_code=404, detail="User not found")
blocked = crud.get_blocked_users(current_user, db)
if user_to_block.username == current_user.username:
raise HTTPException(status_code=403, detail="Cannot block yourself")
if username in blocked:
raise HTTPException(status_code=403, detail="User already blocked")
return crud.create_block_record(user=current_user, user_to_block=user_to_block, db=db)
# TODO: test
@router.delete("/me/unblock")
async def unblock_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_unblock = crud.get_user(username=username, db=db)
blocked = crud.get_blocked_users(user=current_user, db=db)
if not user_to_unblock:
raise HTTPException(status_code=404, detail="User not found")
if user_to_unblock.username not in blocked:
raise HTTPException(status_code=403, detail="User not blocked")
return crud.remove_block_record(user=current_user, blocked_user=user_to_unblock, db=db)
@router.get("/me/info", response_model=schemas.User)
async def read_users_me(current_user: schemas.User = Depends(get_current_active_user)):
return current_user
@router.post("/me/change_password")
def change_password(change_password_schema: schemas.ChangePasswordSchema,
current_user: schemas.User = Depends(get_current_user), db: Session = Depends(get_db)):
db_user = crud.get_user(db=db, username=current_user.username)
if not verify_password(change_password_schema.oldPassword, db_user.hashed_password):
raise HTTPException(status_code=401, detail="Invalid old password")
return crud.change_password(user=current_user, new_password=change_password_schema.newPassword, db=db)
@router.get("/{username}/history", response_model=List[schemas.GameHistoryEntry])
def get_users_game_history(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
history = crud.get_last_20_matches(db=db, user=db_user)
return history
@router.get("/{username}/stats", response_model=schemas.Stats)
def get_stats(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_stats(db=db, user=db_user)
@router.get("/me/settings", response_model=schemas.Settings)
def get_settings(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)):
return crud.get_settings(db=db, user=current_user)
@router.patch("/me/settings", response_model=schemas.Settings)
def update_settings(settings: schemas.Settings, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.update_settings(settings=settings, db=db, user=current_user)
@router.get("/ranking/top", response_model=List[schemas.UserGet])
def get_top_ranked(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users_by_rating(db, skip=skip, limit=limit)
return users
| 46.159763 | 122 | 0.735419 | from fastapi import Depends, HTTPException
from fastapi import status, APIRouter
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from app.core.dependecies import get_db, SECRET_KEY, ALGORITHM, TokenPurpose, get_current_active_user, get_current_user, \
verify_password
from app.core.internal import schemas, crud
from app.game_engine.models import *
router = APIRouter(
prefix="/users",
tags=["users"],
responses={404: {"error": "Not found"}, 422: {"error": "Invalid input data"}},
)
@router.post("/verify/{token}")
def verify_user(token: str, db: Session = Depends(get_db)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
purpose = payload.get("purpose")
is_verifed = payload.get("hash")
if email is None or purpose != TokenPurpose.ACCOUNT_VERIFICATION:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
token_data = schemas.VerificationTokenData(email=email, purpose=purpose, hash=is_verifed)
except JWTError:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
user = crud.get_user_by_email(db, token_data.email)
if user is None:
raise HTTPException(status_code=400, detail="The verification link is invalid or has expired.")
if user.is_verified:
raise HTTPException(status_code=200, detail='Account already confirmed. Please login.')
else:
crud.verify_user(user=user, db=db)
return {"detail": "Account verified successfully"}
@router.post("/change_password")
def change_password(change_password_schema: schemas.EmergencyChangePasswordSchema, db: Session = Depends(get_db)):
try:
token = change_password_schema.token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
purpose = payload.get("purpose")
hash = payload.get("hash")
if username is None or purpose != TokenPurpose.CHANGE_PASSWORD:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
token_data = schemas.TokenData(username=username, purpose=purpose, hash=hash)
except JWTError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
user = crud.get_user(db, token_data.username)
if user is None:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
if user.hashed_password != hash:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials")
return crud.change_password(user, change_password_schema.newPassword, db)
@router.post("", response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
db_user_1 = crud.get_user(db, username=user.username)
if db_user_1:
raise HTTPException(status_code=400, detail="This name is taken")
return crud.create_user(db=db, user=user)
@router.get("", response_model=List[schemas.UserGet])
def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@router.get("/{username}", response_model=schemas.UserGet)
def read_user(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@router.get("/me/blocked", response_model=List[str])
async def get_users_blocked(current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.get_blocked_users(user=current_user, db=db)
@router.post("/me/block", response_model=schemas.BlockedUsers)
async def block_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_block = crud.get_user(username=username, db=db)
if not user_to_block:
raise HTTPException(status_code=404, detail="User not found")
blocked = crud.get_blocked_users(current_user, db)
if user_to_block.username == current_user.username:
raise HTTPException(status_code=403, detail="Cannot block yourself")
if username in blocked:
raise HTTPException(status_code=403, detail="User already blocked")
return crud.create_block_record(user=current_user, user_to_block=user_to_block, db=db)
@router.delete("/me/unblock")
async def unblock_user(usernameSchema: schemas.Username, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
username = usernameSchema.username
user_to_unblock = crud.get_user(username=username, db=db)
blocked = crud.get_blocked_users(user=current_user, db=db)
if not user_to_unblock:
raise HTTPException(status_code=404, detail="User not found")
if user_to_unblock.username not in blocked:
raise HTTPException(status_code=403, detail="User not blocked")
return crud.remove_block_record(user=current_user, blocked_user=user_to_unblock, db=db)
@router.get("/me/info", response_model=schemas.User)
async def read_users_me(current_user: schemas.User = Depends(get_current_active_user)):
return current_user
@router.post("/me/change_password")
def change_password(change_password_schema: schemas.ChangePasswordSchema,
current_user: schemas.User = Depends(get_current_user), db: Session = Depends(get_db)):
db_user = crud.get_user(db=db, username=current_user.username)
if not verify_password(change_password_schema.oldPassword, db_user.hashed_password):
raise HTTPException(status_code=401, detail="Invalid old password")
return crud.change_password(user=current_user, new_password=change_password_schema.newPassword, db=db)
@router.get("/{username}/history", response_model=List[schemas.GameHistoryEntry])
def get_users_game_history(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
history = crud.get_last_20_matches(db=db, user=db_user)
return history
@router.get("/{username}/stats", response_model=schemas.Stats)
def get_stats(username: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, username=username)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return crud.get_stats(db=db, user=db_user)
@router.get("/me/settings", response_model=schemas.Settings)
def get_settings(current_user: schemas.User = Depends(get_current_active_user), db: Session = Depends(get_db)):
return crud.get_settings(db=db, user=current_user)
@router.patch("/me/settings", response_model=schemas.Settings)
def update_settings(settings: schemas.Settings, current_user: schemas.User = Depends(get_current_active_user),
db: Session = Depends(get_db)):
return crud.update_settings(settings=settings, db=db, user=current_user)
@router.get("/ranking/top", response_model=List[schemas.UserGet])
def get_top_ranked(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users_by_rating(db, skip=skip, limit=limit)
return users
| true | true |
f724f1291e5caf124dff577988cb066ae98c82f0 | 22,034 | py | Python | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2019-09-19T15:22:15.000Z | 2019-09-19T15:22:15.000Z | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2017-05-11T22:57:49.000Z | 2017-05-11T22:57:49.000Z | tests/gcp/hooks/test_bigtable.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-16T09:03:58.000Z | 2020-11-16T09:03:58.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import google
from google.cloud.bigtable import Client
from google.cloud.bigtable.instance import Instance
from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id, \
mock_base_gcp_hook_default_project_id, GCP_PROJECT_ID_HOOK_UNIT_TEST
from tests.compat import mock, PropertyMock
from airflow import AirflowException
from airflow.gcp.hooks.bigtable import BigtableHook
CBT_INSTANCE = 'instance'
CBT_CLUSTER = 'cluster'
CBT_ZONE = 'zone'
CBT_TABLE = 'table'
class TestBigtableHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.get_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
delete_method = instance_method.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_missing_project_id(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_not_called()
instance_create.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_not_called()
instance_exists_method.assert_not_called()
table_delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
class TestBigtableHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id='new-project',
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id='example-project')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='new-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id='new-project',
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='new-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.create_table(
instance=instance,
table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch('google.cloud.bigtable.cluster.Cluster.update')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_update_cluster(self, get_client, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance,
cluster_id=CBT_CLUSTER,
nodes=4)
get_client.assert_not_called()
update.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.list_column_families')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.get_cluster_states')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| 49.626126 | 102 | 0.745983 |
import unittest
import google
from google.cloud.bigtable import Client
from google.cloud.bigtable.instance import Instance
from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id, \
mock_base_gcp_hook_default_project_id, GCP_PROJECT_ID_HOOK_UNIT_TEST
from tests.compat import mock, PropertyMock
from airflow import AirflowException
from airflow.gcp.hooks.bigtable import BigtableHook
CBT_INSTANCE = 'instance'
CBT_CLUSTER = 'cluster'
CBT_ZONE = 'zone'
CBT_TABLE = 'table'
class TestBigtableHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.bigtable_hook_no_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_no_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.get_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_no_default_project_id.get_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
delete_method = instance_method.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_instance(instance_id=CBT_INSTANCE)
instance_exists_method.assert_not_called()
instance_method.assert_not_called()
delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_missing_project_id(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_not_called()
instance_create.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_missing_project_id(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
with self.assertRaises(AirflowException) as cm:
self.bigtable_hook_no_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_not_called()
instance_exists_method.assert_not_called()
table_delete_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_no_default_project_id.delete_table(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
class TestBigtableHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id='test')
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.bigtable.BigtableHook._get_credentials")
@mock.patch("airflow.gcp.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value,
admin=True
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.bigtable_hook_default_project_id._client, result)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNotNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id='new-project',
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNotNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='example-project')
self.assertIsNone(res)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id='new-project')
self.assertIsNone(res)
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE)
instance_method.assert_called_once_with('instance')
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id='example-project')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='example-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch('google.cloud.bigtable.instance.Instance.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE)
get_client.assert_called_once_with(project_id='new-project')
instance_create.assert_called_once_with(clusters=mock.ANY)
self.assertEqual(res.instance_id, 'instance')
@mock.patch(
'airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='example-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id='new-project',
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE)
get_client.assert_called_once_with(project_id='new-project')
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.create')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.create_table(
instance=instance,
table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch('google.cloud.bigtable.cluster.Cluster.update')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_update_cluster(self, get_client, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance,
cluster_id=CBT_CLUSTER,
nodes=4)
get_client.assert_not_called()
update.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.list_column_families')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch('google.cloud.bigtable.table.Table.get_cluster_states')
@mock.patch('airflow.gcp.hooks.bigtable.BigtableHook._get_client')
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(
instance_id=CBT_INSTANCE,
client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| true | true |
f724f160afc41ed74cc89d83afb1c22e3d02f806 | 3,920 | py | Python | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | null | null | null | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | 5 | 2020-04-22T19:15:06.000Z | 2021-03-25T15:28:30.000Z | example.py | byu-dml/d3m-profiler | 9a3bc45061267091b0109f2159648785e370a18b | [
"MIT"
] | null | null | null | import numpy as np
import multiprocessing as mp
import pathlib as pl
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC as SupportVectorClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC as SupportVectorClassifier
from d3m_profiler import rebalance, score_results
from d3m_profiler.evaluate_models import run_models, _save_results
from d3m_profiler.embed import embed
_NUM_THREADS = mp.cpu_count()
results = pd.DataFrame(columns=['data_collection', 'classifier', 'balanced', 'accuracy_score', 'f1_score_micro', 'f1_score_macro', 'f1_score_weighted'])
#closed_bal_file = 'data/closed_d3m_bal.csv'
#closed_unbal_file = 'data/closed_d3m_unbal.csv'
#open_bal_file = 'data/open_d3m_bal.csv'
#open_unbal_file = 'data/open_d3m_unbal.csv'
#files = [closed_unbal_file, closed_bal_file, open_unbal_file, open_bal_file]
type_column = 'colType'
model_weights_path = 'torontobooks_unigrams.bin'
open_d3m_file = 'data/open_d3m_data.csv'
closed_d3m_file = 'data/closed_d3m_data.csv'
files = [open_d3m_file]
#files = [open_d3m_file, closed_d3m_file]
#files = [closed_d3m_file, open_d3m_file]
for _file in files:
data_collection = _file.split('/')[1]
print(data_collection)
orig_df = pd.read_csv(_file)
orig_df = orig_df.applymap(str)
dfs = [embed(orig_df, type_column, model_weights_path)]
class_counts = orig_df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
if (not balanced):
print('rebalancing {} data collection'.format(data_collection))
rebal_df = rebalance.rebalance_SMOTE(orig_df, type_column, 'smote', model_weights_path)
dfs.append(rebal_df)
for df in dfs:
class_counts = df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
print(balanced)
xtrain, xtest, ytrain, ytest = None, None, None, None
if (balanced):
X_syn = df[df['datasetName'].eq('SYNTHETIC')].drop(['datasetName', type_column], axis=1)
y_syn = df[df['datasetName'].eq('SYNTHETIC')][type_column]
X_organ = df[df['datasetName'] != 'SYNTHETIC'].drop(['datasetName', type_column], axis=1)
y_organ = df[df['datasetName'] != 'SYNTHETIC'][type_column]
xtrain, xtest, ytrain, ytest = train_test_split(X_organ, y_organ, test_size=0.33)
xtrain = xtrain.append(X_syn)
ytrain = ytrain.append(y_syn)
else:
X = df.drop(['datasetName', type_column], axis=1)
y = df[type_column]
dataset_names = df['datasetName']
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.33)
#for model_class in [SupportVectorClassifier, RandomForestClassifier]:
for model_class in [RandomForestClassifier]:
classifier = model_class.__name__
print('evaluating model: {}'.format(classifier))
model = model_class()
print('fitting model...')
model.fit(xtrain, ytrain)
if (balanced):
filename = 'RF_public_model.sav'
pickle.dump(model, open(filename, 'wb'))
yhat = model.predict(xtest)
accuracy = accuracy_score(ytest, yhat)
f1_micro = f1_score(ytest, yhat, average='micro')
f1_macro = f1_score(ytest, yhat, average='macro')
f1_weighted = f1_score(ytest, yhat, average='weighted')
results = results.append({'data_collection': data_collection, 'classifier': classifier, 'balanced': balanced, 'accuracy_score': accuracy,
'f1_score_micro': f1_micro, 'f1_score_macro': f1_macro, 'f1_score_weighted': f1_weighted}, ignore_index=True)
print(results)
results.to_csv('data/results_2.csv', index=False)
| 36.981132 | 152 | 0.685969 | import numpy as np
import multiprocessing as mp
import pathlib as pl
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC as SupportVectorClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC as SupportVectorClassifier
from d3m_profiler import rebalance, score_results
from d3m_profiler.evaluate_models import run_models, _save_results
from d3m_profiler.embed import embed
_NUM_THREADS = mp.cpu_count()
results = pd.DataFrame(columns=['data_collection', 'classifier', 'balanced', 'accuracy_score', 'f1_score_micro', 'f1_score_macro', 'f1_score_weighted'])
type_column = 'colType'
model_weights_path = 'torontobooks_unigrams.bin'
open_d3m_file = 'data/open_d3m_data.csv'
closed_d3m_file = 'data/closed_d3m_data.csv'
files = [open_d3m_file]
for _file in files:
data_collection = _file.split('/')[1]
print(data_collection)
orig_df = pd.read_csv(_file)
orig_df = orig_df.applymap(str)
dfs = [embed(orig_df, type_column, model_weights_path)]
class_counts = orig_df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
if (not balanced):
print('rebalancing {} data collection'.format(data_collection))
rebal_df = rebalance.rebalance_SMOTE(orig_df, type_column, 'smote', model_weights_path)
dfs.append(rebal_df)
for df in dfs:
class_counts = df[type_column].value_counts().values
balanced = len(set(class_counts)) == 1
print(balanced)
xtrain, xtest, ytrain, ytest = None, None, None, None
if (balanced):
X_syn = df[df['datasetName'].eq('SYNTHETIC')].drop(['datasetName', type_column], axis=1)
y_syn = df[df['datasetName'].eq('SYNTHETIC')][type_column]
X_organ = df[df['datasetName'] != 'SYNTHETIC'].drop(['datasetName', type_column], axis=1)
y_organ = df[df['datasetName'] != 'SYNTHETIC'][type_column]
xtrain, xtest, ytrain, ytest = train_test_split(X_organ, y_organ, test_size=0.33)
xtrain = xtrain.append(X_syn)
ytrain = ytrain.append(y_syn)
else:
X = df.drop(['datasetName', type_column], axis=1)
y = df[type_column]
dataset_names = df['datasetName']
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.33)
for model_class in [RandomForestClassifier]:
classifier = model_class.__name__
print('evaluating model: {}'.format(classifier))
model = model_class()
print('fitting model...')
model.fit(xtrain, ytrain)
if (balanced):
filename = 'RF_public_model.sav'
pickle.dump(model, open(filename, 'wb'))
yhat = model.predict(xtest)
accuracy = accuracy_score(ytest, yhat)
f1_micro = f1_score(ytest, yhat, average='micro')
f1_macro = f1_score(ytest, yhat, average='macro')
f1_weighted = f1_score(ytest, yhat, average='weighted')
results = results.append({'data_collection': data_collection, 'classifier': classifier, 'balanced': balanced, 'accuracy_score': accuracy,
'f1_score_micro': f1_micro, 'f1_score_macro': f1_macro, 'f1_score_weighted': f1_weighted}, ignore_index=True)
print(results)
results.to_csv('data/results_2.csv', index=False)
| true | true |
f724f1b8cc56dd4a31f3d47d459ebef89ff7cdca | 21,725 | py | Python | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | YunongPan/swin_gui | 52adc917d3413781e76609d021c6a2579fdf44d1 | [
"Apache-2.0"
] | null | null | null | import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)## check mark ##
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 40.683521 | 79 | 0.529758 | import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)otations(self, ann_file):
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise:
precisions = cocoEval.eval['precision']
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| true | true |
f724f1c71834d7b7a9d035b610d98d0f0773158a | 1,933 | py | Python | website/migrations/0005_auto_20191213_1623.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | 1 | 2021-12-19T01:05:26.000Z | 2021-12-19T01:05:26.000Z | website/migrations/0005_auto_20191213_1623.py | iQuISE/iquise-website | e6125fe938c549e020cd53a5aa718de101e972e9 | [
"MIT"
] | 16 | 2020-07-29T14:12:30.000Z | 2021-08-24T13:00:48.000Z | website/migrations/0005_auto_20191213_1623.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-12-13 21:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0004_presenter_profile_image_thumb'),
]
operations = [
migrations.CreateModel(
name='EmbeddedVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.CharField(max_length=50)),
('public', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmbedEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('html_template', models.TextField(help_text='Use {{ID}} which will get swapped in for the EmbeddedVideo.video_id.')),
('url_help', models.CharField(blank=True, help_text='Used to help the user figure out where the video_id is.', max_length=100)),
],
),
migrations.AlterField(
model_name='presenter',
name='profile_image_thumb',
field=models.ImageField(blank=True, editable=False, upload_to='thumbs'),
),
migrations.AddField(
model_name='embeddedvideo',
name='engine',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='website.EmbedEngine'),
),
migrations.AddField(
model_name='presentation',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='website.EmbeddedVideo'),
),
]
| 39.44898 | 144 | 0.608381 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0004_presenter_profile_image_thumb'),
]
operations = [
migrations.CreateModel(
name='EmbeddedVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video_id', models.CharField(max_length=50)),
('public', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmbedEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('html_template', models.TextField(help_text='Use {{ID}} which will get swapped in for the EmbeddedVideo.video_id.')),
('url_help', models.CharField(blank=True, help_text='Used to help the user figure out where the video_id is.', max_length=100)),
],
),
migrations.AlterField(
model_name='presenter',
name='profile_image_thumb',
field=models.ImageField(blank=True, editable=False, upload_to='thumbs'),
),
migrations.AddField(
model_name='embeddedvideo',
name='engine',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='website.EmbedEngine'),
),
migrations.AddField(
model_name='presentation',
name='video',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='website.EmbeddedVideo'),
),
]
| true | true |
f724f2e2d80fad9431aae8674677cf6022972166 | 7,735 | py | Python | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | telepresence/proxy/remote.py | Nesurion/telepresence | cfe60eb91b42345ff890b7726c6388e923bc441a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from subprocess import STDOUT, CalledProcessError
from typing import Dict, Optional
from telepresence import image_version
from telepresence.runner import Runner
class RemoteInfo(object):
"""
Information about the remote setup.
:ivar namespace str: The Kubernetes namespace.
:ivar context str: The Kubernetes context.
:ivar deployment_name str: The name of the Deployment object.
:ivar pod_name str: The name of the pod created by the Deployment.
:ivar deployment_config dict: The decoded k8s object (i.e. JSON/YAML).
:ivar container_config dict: The container within the Deployment JSON.
:ivar container_name str: The name of the container.
"""
def __init__(
self,
runner: Runner,
deployment_name: str,
pod_name: str,
deployment_config: dict,
) -> None:
self.deployment_name = deployment_name
self.pod_name = pod_name
self.deployment_config = deployment_config
cs = deployment_config["spec"]["template"]["spec"]["containers"]
containers = [c for c in cs if "telepresence-k8s" in c["image"]]
if not containers:
containers = [c for c in cs if "telepresence-proxy" in c["image"]]
if not containers:
raise RuntimeError(
"Could not find container with image "
"'datawire/telepresence-k8s' in pod {}.".format(pod_name)
)
self.container_config = containers[0] # type: Dict
self.container_name = self.container_config["name"] # type: str
def remote_telepresence_version(self) -> str:
"""Return the version used by the remote Telepresence container."""
name, version = self.container_config["image"].split(":")
if name.endswith("telepresence-proxy"):
return image_version
return version
def get_deployment_json(
runner: Runner,
deployment_name: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
"""Get the decoded JSON for a deployment.
If this is a Deployment we created, the run_id is also passed in - this is
the session id we set for the telepresence label. Otherwise run_id is None
and the Deployment name must be used to locate the Deployment.
"""
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_output(
runner.kubectl(get_deployment + [deployment_name]),
stderr=STDOUT
)
)
else:
# When using a selector we get a list of objects, not just one:
return json.loads(
runner.get_output(
runner.kubectl(
get_deployment + ["--selector=telepresence=" + run_id]
),
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise runner.fail(
"Failed to find deployment {}:\n{}".format(
deployment_name, e.stdout
)
)
finally:
span.end()
def wait_for_pod(runner: Runner, remote_info: RemoteInfo) -> None:
"""Wait for the pod to start running."""
span = runner.span()
for _ in runner.loop_until(120, 0.25):
try:
pod = json.loads(
runner.get_output(
runner.kubectl(
"get", "pod", remote_info.pod_name, "-o", "json"
)
)
)
except CalledProcessError:
continue
if pod["status"]["phase"] == "Running":
for container in pod["status"]["containerStatuses"]:
if container["name"] == remote_info.container_name and (
container["ready"]
):
span.end()
return
span.end()
raise RuntimeError(
"Pod isn't starting or can't be found: {}".format(pod["status"])
)
def get_remote_info(
runner: Runner,
deployment_name: str,
deployment_type: str,
timeout: float,
run_id: Optional[str] = None,
) -> RemoteInfo:
"""
Given the deployment name, return a RemoteInfo object.
If this is a Deployment we created, the run_id is also passed in - this is
the session identifier we set for the telepresence label. Otherwise run_id
is None and the Deployment name must be used to locate the Deployment.
"""
span = runner.span()
deployment = get_deployment_json(
runner, deployment_name, deployment_type, run_id=run_id
)
dst_metadata = deployment["spec"]["template"]["metadata"]
expected_labels = dst_metadata.get("labels", {})
runner.write("Searching for Telepresence pod:")
runner.write(" with name {}-*".format(deployment_name))
runner.write(" with labels {}".format(expected_labels))
cmd = "get pod -o json --export".split()
if run_id:
cmd.append("--selector=telepresence={}".format(run_id))
for _ in runner.loop_until(timeout, 1):
pods = json.loads(runner.get_output(runner.kubectl(cmd)))["items"]
for pod in pods:
name = pod["metadata"]["name"]
phase = pod["status"]["phase"]
labels = pod["metadata"].get("labels", {})
runner.write("Checking {}".format(name))
if not name.startswith(deployment_name + "-"):
runner.write("--> Name does not match")
continue
if phase not in ("Pending", "Running"):
runner.write("--> Wrong phase: {}".format(phase))
continue
if not set(expected_labels.items()).issubset(set(labels.items())):
runner.write("--> Labels don't match: {}".format(labels))
continue
runner.write("Looks like we've found our pod!\n")
remote_info = RemoteInfo(
runner,
deployment_name,
name,
deployment,
)
# Ensure remote container is running same version as we are:
remote_version = remote_info.remote_telepresence_version()
if remote_version != image_version:
runner.write("Pod is running Tel {}".format(remote_version))
raise runner.fail((
"The remote datawire/telepresence-k8s container is " +
"running version {}, but this tool is version {}. " +
"Please make sure both are running the same version."
).format(remote_version, image_version))
# Wait for pod to be running:
wait_for_pod(runner, remote_info)
span.end()
return remote_info
# Didn't find pod...
span.end()
raise RuntimeError(
"Telepresence pod not found for Deployment '{}'.".
format(deployment_name)
)
| 35.645161 | 78 | 0.587589 |
import json
from subprocess import STDOUT, CalledProcessError
from typing import Dict, Optional
from telepresence import image_version
from telepresence.runner import Runner
class RemoteInfo(object):
def __init__(
self,
runner: Runner,
deployment_name: str,
pod_name: str,
deployment_config: dict,
) -> None:
self.deployment_name = deployment_name
self.pod_name = pod_name
self.deployment_config = deployment_config
cs = deployment_config["spec"]["template"]["spec"]["containers"]
containers = [c for c in cs if "telepresence-k8s" in c["image"]]
if not containers:
containers = [c for c in cs if "telepresence-proxy" in c["image"]]
if not containers:
raise RuntimeError(
"Could not find container with image "
"'datawire/telepresence-k8s' in pod {}.".format(pod_name)
)
self.container_config = containers[0]
self.container_name = self.container_config["name"]
def remote_telepresence_version(self) -> str:
name, version = self.container_config["image"].split(":")
if name.endswith("telepresence-proxy"):
return image_version
return version
def get_deployment_json(
runner: Runner,
deployment_name: str,
deployment_type: str,
run_id: Optional[str] = None,
) -> Dict:
span = runner.span()
try:
get_deployment = [
"get",
deployment_type,
"-o",
"json",
"--export",
]
if run_id is None:
return json.loads(
runner.get_output(
runner.kubectl(get_deployment + [deployment_name]),
stderr=STDOUT
)
)
else:
return json.loads(
runner.get_output(
runner.kubectl(
get_deployment + ["--selector=telepresence=" + run_id]
),
stderr=STDOUT
)
)["items"][0]
except CalledProcessError as e:
raise runner.fail(
"Failed to find deployment {}:\n{}".format(
deployment_name, e.stdout
)
)
finally:
span.end()
def wait_for_pod(runner: Runner, remote_info: RemoteInfo) -> None:
span = runner.span()
for _ in runner.loop_until(120, 0.25):
try:
pod = json.loads(
runner.get_output(
runner.kubectl(
"get", "pod", remote_info.pod_name, "-o", "json"
)
)
)
except CalledProcessError:
continue
if pod["status"]["phase"] == "Running":
for container in pod["status"]["containerStatuses"]:
if container["name"] == remote_info.container_name and (
container["ready"]
):
span.end()
return
span.end()
raise RuntimeError(
"Pod isn't starting or can't be found: {}".format(pod["status"])
)
def get_remote_info(
runner: Runner,
deployment_name: str,
deployment_type: str,
timeout: float,
run_id: Optional[str] = None,
) -> RemoteInfo:
span = runner.span()
deployment = get_deployment_json(
runner, deployment_name, deployment_type, run_id=run_id
)
dst_metadata = deployment["spec"]["template"]["metadata"]
expected_labels = dst_metadata.get("labels", {})
runner.write("Searching for Telepresence pod:")
runner.write(" with name {}-*".format(deployment_name))
runner.write(" with labels {}".format(expected_labels))
cmd = "get pod -o json --export".split()
if run_id:
cmd.append("--selector=telepresence={}".format(run_id))
for _ in runner.loop_until(timeout, 1):
pods = json.loads(runner.get_output(runner.kubectl(cmd)))["items"]
for pod in pods:
name = pod["metadata"]["name"]
phase = pod["status"]["phase"]
labels = pod["metadata"].get("labels", {})
runner.write("Checking {}".format(name))
if not name.startswith(deployment_name + "-"):
runner.write("--> Name does not match")
continue
if phase not in ("Pending", "Running"):
runner.write("--> Wrong phase: {}".format(phase))
continue
if not set(expected_labels.items()).issubset(set(labels.items())):
runner.write("--> Labels don't match: {}".format(labels))
continue
runner.write("Looks like we've found our pod!\n")
remote_info = RemoteInfo(
runner,
deployment_name,
name,
deployment,
)
remote_version = remote_info.remote_telepresence_version()
if remote_version != image_version:
runner.write("Pod is running Tel {}".format(remote_version))
raise runner.fail((
"The remote datawire/telepresence-k8s container is " +
"running version {}, but this tool is version {}. " +
"Please make sure both are running the same version."
).format(remote_version, image_version))
wait_for_pod(runner, remote_info)
span.end()
return remote_info
span.end()
raise RuntimeError(
"Telepresence pod not found for Deployment '{}'.".
format(deployment_name)
)
| true | true |
f724f33ecccdbc81d47a05655141217459e84376 | 3,882 | py | Python | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | 5 | 2018-10-31T08:55:37.000Z | 2020-01-14T08:18:22.000Z | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | null | null | null | src/simulate.py | ElanVB/noisy_signal_prop | 3ad81e15f02a92b3a669c9b81c8b2f12f331a1b6 | [
"MIT"
] | null | null | null |
# imports
import numpy as np
import os, sys, pickle
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir)
# custom import
from theory import depth
from viz import get_colours
from numpy_simulation import *
from utils import load_experiment
from theory import critical_point
def perform_experiment(experiments):
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
# run simulations for scenario
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
def variance():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def correlation():
# Compute experimental data
experiments = [
{"dist": "none", "noise": (None, None), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 2), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def fixed_point():
# Compute experimental data
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.1), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.2), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.3), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.4), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.5), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.7), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.9), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.1), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.4), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.55), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.7), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.85), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.0), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.15), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.3), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.45), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.75), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.9), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
if __name__ == "__main__":
# results directory
results_dir = os.path.join(file_dir, "../results")
# variance()
# correlation()
fixed_point()
| 44.62069 | 89 | 0.519578 |
import numpy as np
import os, sys, pickle
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir)
from theory import depth
from viz import get_colours
from numpy_simulation import *
from utils import load_experiment
from theory import critical_point
def perform_experiment(experiments):
for i, experiment in enumerate(experiments):
dist = experiment['dist']
noise = experiment['noise']
act = experiment['act']
init = experiment['init']
noisy_signal_prop_simulations(dist, noise, act, init, seed=i)
def variance():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"underflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"overflow"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"underflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"overflow"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def correlation():
experiments = [
{"dist": "none", "noise": (None, None), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 2), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
def fixed_point():
experiments = [
{"dist": "bern", "noise": ('prob_1', 0.1), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.2), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.3), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.4), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.5), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.6), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.7), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.8), "act":"relu", "init":"crit"},
{"dist": "bern", "noise": ('prob_1', 0.9), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.1), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.25), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.4), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.55), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.7), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 0.85), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.0), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.15), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.3), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.45), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.6), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.75), "act":"relu", "init":"crit"},
{"dist": "mult gauss", "noise": ('std', 1.9), "act":"relu", "init":"crit"}
]
perform_experiment(experiments)
if __name__ == "__main__":
results_dir = os.path.join(file_dir, "../results")
fixed_point()
| true | true |
f724f3b40f32d0b3f43e1e0eb69678d13e641ccd | 872 | py | Python | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | python-threatexchange/threatexchange/extensions/text_tlsh/tests/test_tlsh_hash_and_match.py | dxdc/ThreatExchange | f9aff6dd0c90e6c47ffe4151bced4de1676d84f6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from threatexchange.extensions.text_tlsh.text_tlsh import TextTLSHSignal
try:
import tlsh
_DISABLED = False
except ImportError:
_DISABLED = True
@unittest.skipIf(_DISABLED, "tlsh not installed")
class TLSHHasherModuleUnitTest(unittest.TestCase):
def test_tlsh_from_string(self):
expected = {
"A minimum string length must be 256 bytes! "
"That's so much text this means it's not super "
"useful for finding short text!": "T1DFB092A1724AC2C0D3CA48452291E"
"A04A5B75EB903A6E7577A54118FFA8148E98F9426",
"too short": "",
}
for input, expected_hash in expected.items():
hashed = TextTLSHSignal.hash_from_str(input)
assert hashed == expected_hash, f"case: {input}"
| 31.142857 | 79 | 0.681193 |
import unittest
from threatexchange.extensions.text_tlsh.text_tlsh import TextTLSHSignal
try:
import tlsh
_DISABLED = False
except ImportError:
_DISABLED = True
@unittest.skipIf(_DISABLED, "tlsh not installed")
class TLSHHasherModuleUnitTest(unittest.TestCase):
def test_tlsh_from_string(self):
expected = {
"A minimum string length must be 256 bytes! "
"That's so much text this means it's not super "
"useful for finding short text!": "T1DFB092A1724AC2C0D3CA48452291E"
"A04A5B75EB903A6E7577A54118FFA8148E98F9426",
"too short": "",
}
for input, expected_hash in expected.items():
hashed = TextTLSHSignal.hash_from_str(input)
assert hashed == expected_hash, f"case: {input}"
| true | true |
f724f5a468382942a4bfe330e8981878747ab446 | 342 | py | Python | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-11-08T00:28:37.000Z | 2021-11-08T00:28:37.000Z | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-11-02T02:17:37.000Z | 2021-11-02T02:17:37.000Z | backend/rumergy_backend/rumergy/serializers/data_log_measures_serializer.py | Firefly-Tech/rumergy-webapp | 859054bd9ee710a11b393027bb9cb1bad55d0f00 | [
"MIT"
] | 1 | 2021-10-18T22:27:04.000Z | 2021-10-18T22:27:04.000Z | from rumergy_backend.rumergy.models import DataLogMeasures
from rest_framework import serializers
class DataLogMeasuresSerializer(serializers.ModelSerializer):
"""Serializer for data log measures model"""
class Meta:
model = DataLogMeasures
fields = ["id", "data_log", "data_point", "value", "timestamp", "status"]
| 31.090909 | 81 | 0.736842 | from rumergy_backend.rumergy.models import DataLogMeasures
from rest_framework import serializers
class DataLogMeasuresSerializer(serializers.ModelSerializer):
class Meta:
model = DataLogMeasures
fields = ["id", "data_log", "data_point", "value", "timestamp", "status"]
| true | true |
f724f7cfee52c3eaa2ba94c61fd8676dd873a730 | 56,226 | py | Python | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | null | null | null | python/paddle/nn/layer/conv.py | SmirnovKol/Paddle | a3730dc87bc61593514b830727e36e5d19e753cd | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define classes of convolutional neural network
import numpy as np
from paddle import get_flags
from ...device import get_cudnn_version
from .. import Layer
from ..initializer import Normal
from .. import functional as F
from ...fluid.layers import utils
from ..functional.conv import _update_padding_nd
from ...device import is_compiled_with_cuda
from ...device import is_compiled_with_rocm
__all__ = []
def _get_default_param_initializer(num_channels, filter_size):
filter_elem_num = num_channels * np.prod(filter_size)
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
def _reverse_repeat_list(t, n):
"""Reverse the order of `t` and repeat each element for `n` times.
This can be used to translate padding arg used by Conv and Pooling modules
to the ones used by `F.pad`.
"""
return list(x for x in reversed(t) for _ in range(n))
class _ConvNd(Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
transposed,
dims,
stride=1,
padding=0,
padding_mode='zeros',
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(_ConvNd, self).__init__()
assert weight_attr is not False, "weight_attr should not be False in Conv."
self._param_attr = weight_attr
self._bias_attr = bias_attr
self._groups = groups
self._in_channels = in_channels
self._out_channels = out_channels
self._data_format = data_format
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError(
"padding_mode must be one of {}, but got padding_mode='{}'".
format(valid_padding_modes, padding_mode))
if padding_mode in {'reflect', 'replicate', 'circular'
} and not isinstance(padding, np.int):
raise TypeError(
"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int"
)
valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'}
if data_format not in valid_format:
raise ValueError(
"data_format must be one of {}, but got data_format='{}'".
format(valid_format, data_format))
channel_last = (data_format == "NHWC") or (data_format
== "NDHWC") or (data_format
== "NLC")
if channel_last:
self._channel_dim = len(data_format) - 1
else:
self._channel_dim = 1
self._stride = utils.convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, dims,
'kernel_size')
self._padding = padding
self._padding_mode = padding_mode
self.output_padding = output_padding
if dims != 1:
self._updated_padding, self._padding_algorithm = _update_padding_nd(
padding, channel_last, dims)
if transposed:
filter_shape = [self._in_channels, out_channels // groups
] + self._kernel_size
else:
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.")
if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = utils.convert_to_list(padding, dims,
'padding')
self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2)
self._updated_padding, self._padding_algorithm = _update_padding_nd(
0, channel_last, dims)
filter_shape = [out_channels, in_channels // groups
] + self._kernel_size
def _get_default_param_initializer():
if transposed:
return None
filter_elem_num = np.prod(self._kernel_size) * self._in_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
self.weight = self.create_parameter(
shape=filter_shape,
attr=self._param_attr,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(attr=self._bias_attr,
shape=[self._out_channels],
is_bias=True)
cudnn_version = get_cudnn_version()
self._use_cudnn = True if (is_compiled_with_cuda()
and cudnn_version is not None) else False
self._op_type = "conv" + str(dims) + 'd'
if self._op_type == 'conv2d' and (in_channels == groups
and in_channels != 1
and out_channels % in_channels == 0):
self._op_type = 'depthwise_conv2d'
if is_compiled_with_rocm():
self._use_cudnn = True
else:
self._use_cudnn = False
if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn")
["FLAGS_conv2d_disable_cudnn"]):
self._use_cudnn = False
def extra_repr(self):
main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'
if self._stride != [1] * len(self._stride):
main_str += ', stride={_stride}'
if self._padding != 0:
main_str += ', padding={_padding}'
if self._padding_mode != 'zeros':
main_str += ', padding_mode={_padding_mode}'
if self.output_padding != 0:
main_str += ', output_padding={output_padding}'
if self._dilation != [1] * len(self._dilation):
main_str += ', dilation={_dilation}'
if self._groups != 1:
main_str += ', groups={_groups}'
main_str += ', data_format={_data_format}'
return main_str.format(**self.__dict__)
class Conv1D(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv1D`` class.
For more details, refer to code examples.
The convolution1D layer calculates the output based on the input, filter
and stride, padding, dilation, groups parameters. Input and
Output are in NCL format or NLC format, where N is batch size, C is the number of
the feature map, L is the length of the feature map.
Filter's shape is [MCK] , where M is the number of output feature map,
C is the number of input feature map, K is the size of the kernel.
If the groups is greater than 1, C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X` , the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with 'NCL' format or 'NLC' format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCK] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, L_{in})`
Kernel shape: :math:`(C_{out}, C_{in}, K)`
- Output:
Output shape: :math:`(N, C_{out}, L_{out})`
Where
.. math::
L_{out}&= \frac{(L_{in} + 2 * padding - (dilation * (L_f - 1) + 1))}{stride} + 1 \\
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of filter. It is as same as the output
feature map.
kernel_size (int|tuple|list): The filter size. If kernel_size is a tuple/list,
it must contain one integer, (kernel_size).
stride (int|tuple|list, optional): The stride size. If stride is a tuple/list, it must
contain one integer, (stride_size). Default: 1.
padding(int|str|tuple|list, optional): The size of zeros to be padded. It must be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means the feature map is zero paded by size of `padding` on both sides.
3. a list[int] or tuple[int] whose length is 1, which means the feature map is zero paded by size of `padding[0]` on both sides.
The default value is 0.
dilation (int|tuple|list, optional): The dilation size. If dilation is a tuple/list, it must
contain one integer, (dilation_size). Default: 1.
groups (int, optional): The groups number of the conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
padding_mode(str, optional): Four modes: 'zeros', 'reflect', 'replicate', 'circular'.
When in 'zeros' mode, this op uses zeros to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'replicate' mode, uses input boundaries to pad the input tensor.
When in 'circular' mode, uses circular input to pad the input tensor.
Default is 'zeros'.
weight_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv1d. If it is set to None or one attribute of ParamAttr, conv1d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv1d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv1d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: 3-D tensor with shape: (batch, in_channels, length) or (batch, length, in_channels).
- weight: 3-D tensor with shape: (out_channels, in_channels, kernel_size)
- bias: 1-D tensor with shape: (out_channels)
- output: 3-D tensor with same shape as input x.
Raises:
None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y_t = conv(x_t)
print(y_t)
# [[[133. 238.]
# [160. 211.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
1,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
padding = 0
if self._padding_mode != "zeros":
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
else:
padding = self._padding
out = F.conv1d(x,
self.weight,
bias=self.bias,
padding=padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv1DTranspose(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv1DTranspose`` class.
For more details, refer to code examples.
The 1-D convolution transpose layer calculates the output based on the input,
filter, and dilation, stride, padding. Input(Input) and output(Output)
are in 'NCL' format or 'NLC' where N is batch size, C is the number of channels,
L is the length of the feature. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a 3-D Tensor with 'NCL' format or 'NLC' format.
* :math:`W`: Kernel value, a 3-D Tensor with 'MCK' format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 3-D Tensor with data format 'NCL' of 'NLC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, L_{in})`
Filter shape: :math:`(C_{in}, C_{out}, L_f)`
- Output:
Output shape: :math:`(N, C_{out}, L_{out})`
Where
.. math::
L^\prime_{out} &= (L_{in} - 1) * stride - pad_top - pad_bottom + dilation * (L_f - 1) + 1 \\\\
L_{out} &\in [ L^\prime_{out}, L^\prime_{out} + stride ]
Note:
The conv1d_transpose can be seen as the backward of the conv1d. For conv1d,
when stride > 1, conv1d maps multiple input shape to the same output shape,
so for conv1d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`L_{out} = L^\prime_{out}`;
else, the :math:`L_{out}` of the output size must between :math:`L^\prime_{out}`
and :math:`L^\prime_{out} + stride`.
Args:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of the filter. It is as same as the output
feature map.
kernel_size(int|tuple|list, optional): The filter size. If kernel_size is a tuple/list,
it must contain one integers, (kernel_size). None if
use output size to calculate kernel_size. Default: None. kernel_size and
output_size should not be None at the same time.
stride(int|tuple|list, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple/list, it must contain one integer, (stride_size).
Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in two forms:
`[pad]` or `[pad_left, pad_right]`. Default: padding = 0.
output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.
If it is a tuple/list, it must contain one integer. Default: 0.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
bias(bool, optional): Whether to use bias. Default: True.
dilation(int|tuple|list, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple/list, it must contain one integer, (dilation_size).
Default: dilation = 1.
weight_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv1d_transpose. If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv1d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x(Tensor): 3-D tensor with shape (batch, in_channels, length) when data_format is "NCL" or shape (batch, length, in_channels) when data_format is "NLC".
- weight(Tensor): 3-D tensor with shape (in_channels, out_channels, kernel_length).
- bias(Tensor): 1-D tensor with shape (out_channels).
- output_size(int|tuple|list, optional): The output image size. If output size is a tuple/list, it must contain one integer, (feature_length). None if use kernel_size, padding, output_padding and stride to calculate output_size. If output_size and kernel_size are specified at the same time, They should follow the formula above. Default: None. output_size and kernel_size should not be None at the same time.
- output(Tensor): 3-D tensor with same shape as input x.
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32)
# shape: (2, 1, 2)
y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
1,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
out = F.conv1d_transpose(x,
self.weight,
bias=self.bias,
output_size=output_size,
output_padding=self.output_padding,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv2D(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{out}, C_{in}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
2,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv2DTranspose(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [CMHW] , where C is the number of input feature map,
M is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <https://arxiv.org/pdf/1603.07285.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [CMHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D ``Tensor`` with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of channels produced by the convolution.
kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,
it must contain two integers, (kernel_size_H, kernel_size_W).
Otherwise, the kernel will be a square.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` on both sides
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
weight_attr(ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr|bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCHW" or "NHWC". Default: "NCHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{in}, C_{out}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\_size[0] - 1) + 1
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\_size[1] - 1) + 1
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] )
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
2,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv2d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
class Conv3D(_ConvNd):
r"""
**Convlution3d Layer**
The convolution3d layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number of input channels in the input image.
out_channels(int): The number of output channels produced by the convolution.
kernel_size(int|list|tuple, optional): The size of the convolving kernel.
stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCDHW" or "NDHWC". Default: "NCDHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{out}, C_{in}, K_{d}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1
H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1
W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\_size[2] - 1) + 1))}{strides[2]} + 1
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
3,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv3DTranspose(_ConvNd):
r"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with CMDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of channels produced by the convolution.
kernel_size(int|list|tuple): The kernel size. If kernel_size is a list/tuple,
it must contain three integers, (kernel_size_D, kernel_size_H, kernel_size_W).
Otherwise, the kernel will be a square.
stride(int|list|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a list/tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.
1. a string in ['valid', 'same'].
2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding`
3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].
4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.
5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).
The default value is 0.
output_padding(int|list|tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0.
dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
data_format(str, optional): Data format that specifies the layout of input.
It can be "NCDHW" or "NDHWC". Default: "NCDHW".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Shape:
- x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- weight: :math:`(C_{in}, C_{out}, K_{d}, K_{h}, K_{w})`
- bias: :math:`(C_{out})`
- output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\_size[0] - 1) + 1
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\_size[1] - 1) + 1
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (kernel\_size[2] - 1) + 1
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 10, 10, 10)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
3,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv3d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
| 47.974403 | 417 | 0.571426 |
import numpy as np
from paddle import get_flags
from ...device import get_cudnn_version
from .. import Layer
from ..initializer import Normal
from .. import functional as F
from ...fluid.layers import utils
from ..functional.conv import _update_padding_nd
from ...device import is_compiled_with_cuda
from ...device import is_compiled_with_rocm
__all__ = []
def _get_default_param_initializer(num_channels, filter_size):
filter_elem_num = num_channels * np.prod(filter_size)
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
def _reverse_repeat_list(t, n):
return list(x for x in reversed(t) for _ in range(n))
class _ConvNd(Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
transposed,
dims,
stride=1,
padding=0,
padding_mode='zeros',
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(_ConvNd, self).__init__()
assert weight_attr is not False, "weight_attr should not be False in Conv."
self._param_attr = weight_attr
self._bias_attr = bias_attr
self._groups = groups
self._in_channels = in_channels
self._out_channels = out_channels
self._data_format = data_format
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError(
"padding_mode must be one of {}, but got padding_mode='{}'".
format(valid_padding_modes, padding_mode))
if padding_mode in {'reflect', 'replicate', 'circular'
} and not isinstance(padding, np.int):
raise TypeError(
"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int"
)
valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'}
if data_format not in valid_format:
raise ValueError(
"data_format must be one of {}, but got data_format='{}'".
format(valid_format, data_format))
channel_last = (data_format == "NHWC") or (data_format
== "NDHWC") or (data_format
== "NLC")
if channel_last:
self._channel_dim = len(data_format) - 1
else:
self._channel_dim = 1
self._stride = utils.convert_to_list(stride, dims, 'stride')
self._dilation = utils.convert_to_list(dilation, dims, 'dilation')
self._kernel_size = utils.convert_to_list(kernel_size, dims,
'kernel_size')
self._padding = padding
self._padding_mode = padding_mode
self.output_padding = output_padding
if dims != 1:
self._updated_padding, self._padding_algorithm = _update_padding_nd(
padding, channel_last, dims)
if transposed:
filter_shape = [self._in_channels, out_channels // groups
] + self._kernel_size
else:
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups.")
if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = utils.convert_to_list(padding, dims,
'padding')
self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2)
self._updated_padding, self._padding_algorithm = _update_padding_nd(
0, channel_last, dims)
filter_shape = [out_channels, in_channels // groups
] + self._kernel_size
def _get_default_param_initializer():
if transposed:
return None
filter_elem_num = np.prod(self._kernel_size) * self._in_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std)
self.weight = self.create_parameter(
shape=filter_shape,
attr=self._param_attr,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(attr=self._bias_attr,
shape=[self._out_channels],
is_bias=True)
cudnn_version = get_cudnn_version()
self._use_cudnn = True if (is_compiled_with_cuda()
and cudnn_version is not None) else False
self._op_type = "conv" + str(dims) + 'd'
if self._op_type == 'conv2d' and (in_channels == groups
and in_channels != 1
and out_channels % in_channels == 0):
self._op_type = 'depthwise_conv2d'
if is_compiled_with_rocm():
self._use_cudnn = True
else:
self._use_cudnn = False
if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn")
["FLAGS_conv2d_disable_cudnn"]):
self._use_cudnn = False
def extra_repr(self):
main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'
if self._stride != [1] * len(self._stride):
main_str += ', stride={_stride}'
if self._padding != 0:
main_str += ', padding={_padding}'
if self._padding_mode != 'zeros':
main_str += ', padding_mode={_padding_mode}'
if self.output_padding != 0:
main_str += ', output_padding={output_padding}'
if self._dilation != [1] * len(self._dilation):
main_str += ', dilation={_dilation}'
if self._groups != 1:
main_str += ', groups={_groups}'
main_str += ', data_format={_data_format}'
return main_str.format(**self.__dict__)
class Conv1D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
1,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
padding = 0
if self._padding_mode != "zeros":
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
else:
padding = self._padding
out = F.conv1d(x,
self.weight,
bias=self.bias,
padding=padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv1DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
1,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
out = F.conv1d_transpose(x,
self.weight,
bias=self.bias,
output_size=output_size,
output_padding=self.output_padding,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv2D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
2,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv2DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Conv2DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
2,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv2d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
class Conv3D(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3D, self).__init__(in_channels,
out_channels,
kernel_size,
False,
3,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
if self._padding_mode != 'zeros':
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
out = F.conv._conv_nd(x,
self.weight,
bias=self.bias,
stride=self._stride,
padding=self._updated_padding,
padding_algorithm=self._padding_algorithm,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format,
channel_dim=self._channel_dim,
op_type=self._op_type,
use_cudnn=self._use_cudnn)
return out
class Conv3DTranspose(_ConvNd):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCDHW"):
super(Conv3DTranspose, self).__init__(in_channels,
out_channels,
kernel_size,
True,
3,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
if output_size is None:
output_padding = self.output_padding
else:
output_padding = 0
out = F.conv3d_transpose(x,
self.weight,
bias=self.bias,
padding=self._padding,
output_padding=output_padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
output_size=output_size,
data_format=self._data_format)
return out
| true | true |
f724fa482a2e003d04725bb8120a96a0f5ea185d | 232 | py | Python | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | 1 | 2015-02-28T14:42:57.000Z | 2015-02-28T14:42:57.000Z | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | null | null | null | torpedo/dialects/torpedo-vertica/tests/connection_config.py | darKoram/torpedo | fbda29225044e946465bae3782a3071d0d1a2fe3 | [
"MIT"
] | null | null | null | drivername = 'vertica+pyodbc'
username = 'your_name'
host = 'your_host_ip_or_hostname'
database = 'your_db_name'
# odbcinst.ini entry [vertica_deploy_test_db]
odbcpath = '/path/to/your/odbc.ini'
datasource = 'your_odbc.ini_section'
| 29 | 45 | 0.780172 | drivername = 'vertica+pyodbc'
username = 'your_name'
host = 'your_host_ip_or_hostname'
database = 'your_db_name'
odbcpath = '/path/to/your/odbc.ini'
datasource = 'your_odbc.ini_section'
| true | true |
f724fa517398283eeaa453c0a6afffa1631cdf46 | 3,846 | py | Python | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 11 | 2020-01-23T11:32:26.000Z | 2021-09-23T09:24:02.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 26 | 2019-07-15T02:38:22.000Z | 2021-12-01T04:14:17.000Z | TA-linode/bin/ta_linode/aob_py3/splunktalib/common/log.py | jriddle-linode/splunk-addon-linode | 5954acd12ef88ab991365ef51072db68aed46aa1 | [
"Apache-2.0"
] | 6 | 2019-07-14T17:44:06.000Z | 2020-11-17T17:33:23.000Z | # SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved.
log utility for TA
"""
from builtins import object
import logging
import logging.handlers as handlers
import os.path as op
from splunktalib.splunk_platform import make_splunkhome_path
import splunktalib.common.util as cutil
from splunktalib.common.pattern import singleton
import time
logging.Formatter.converter = time.gmtime
def log_enter_exit(logger):
"""
Log decorator to log function enter and exit
"""
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
return wrapper
return log_decorator
@singleton
class Logs(object):
def __init__(self, namespace=None, default_level=logging.INFO):
self._loggers = {}
self._default_level = default_level
if namespace is None:
namespace = cutil.get_appname_from_path(op.abspath(__file__))
if namespace:
namespace = namespace.lower()
self._namespace = namespace
def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
"""
Set up a default logger.
:param name: The log file name.
:param level: The logging level.
:param maxBytes: The maximum log file size before rollover.
:param backupCount: The number of log files to retain.
"""
# Strip ".py" from the log file name if auto-generated by a script.
if level is None:
level = self._default_level
name = self._get_log_name(name)
if name in self._loggers:
return self._loggers[name]
logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
handler_exists = any(
[True for h in logger.handlers if h.baseFilename == logfile]
)
if not handler_exists:
file_handler = handlers.RotatingFileHandler(
logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount
)
formatter = logging.Formatter(
"%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, tid=%(threadName)s, "
"file=%(filename)s, func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
self._loggers[name] = logger
return logger
def set_level(self, level, name=None):
"""
Change the log level of the logging
:param level: the level of the logging to be setLevel
:param name: the name of the logging to set, in case it is not set,
all the loggers will be affected
"""
if name is not None:
name = self._get_log_name(name)
logger = self._loggers.get(name)
if logger is not None:
logger.setLevel(level)
else:
self._default_level = level
for logger in self._loggers.values():
logger.setLevel(level)
def _get_log_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "")
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
name = "{}.log".format(name)
return name
# Global logger
logger = Logs().get_logger("util")
def reset_logger(name):
"""
Reset global logger.
"""
global logger
logger = Logs().get_logger(name)
| 28.488889 | 98 | 0.608684 |
from builtins import object
import logging
import logging.handlers as handlers
import os.path as op
from splunktalib.splunk_platform import make_splunkhome_path
import splunktalib.common.util as cutil
from splunktalib.common.pattern import singleton
import time
logging.Formatter.converter = time.gmtime
def log_enter_exit(logger):
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
return wrapper
return log_decorator
@singleton
class Logs(object):
def __init__(self, namespace=None, default_level=logging.INFO):
self._loggers = {}
self._default_level = default_level
if namespace is None:
namespace = cutil.get_appname_from_path(op.abspath(__file__))
if namespace:
namespace = namespace.lower()
self._namespace = namespace
def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
if level is None:
level = self._default_level
name = self._get_log_name(name)
if name in self._loggers:
return self._loggers[name]
logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
handler_exists = any(
[True for h in logger.handlers if h.baseFilename == logfile]
)
if not handler_exists:
file_handler = handlers.RotatingFileHandler(
logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount
)
formatter = logging.Formatter(
"%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, tid=%(threadName)s, "
"file=%(filename)s, func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
self._loggers[name] = logger
return logger
def set_level(self, level, name=None):
if name is not None:
name = self._get_log_name(name)
logger = self._loggers.get(name)
if logger is not None:
logger.setLevel(level)
else:
self._default_level = level
for logger in self._loggers.values():
logger.setLevel(level)
def _get_log_name(self, name):
if name.endswith(".py"):
name = name.replace(".py", "")
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
name = "{}.log".format(name)
return name
logger = Logs().get_logger("util")
def reset_logger(name):
global logger
logger = Logs().get_logger(name)
| true | true |
f724fb20745e78bebd0c20e4126667f97df1a297 | 1,882 | py | Python | toontown/chat/ToonChatGarbler.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/chat/ToonChatGarbler.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | null | null | null | toontown/chat/ToonChatGarbler.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | 2 | 2017-12-20T17:46:56.000Z | 2021-06-25T02:56:36.000Z | import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
| 36.901961 | 63 | 0.652497 | import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
| true | true |
f724fb8f9f9d4d1e3c0793409f6c05445f76ed63 | 5,652 | py | Python | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 24 | 2017-02-28T15:01:29.000Z | 2022-02-22T08:26:23.000Z | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 19 | 2017-02-24T12:30:26.000Z | 2022-02-25T04:57:32.000Z | xscale/signal/tests/test_fitting.py | serazing/xscale | a804866aa6f6a5a0f293a7f6765ea17403159134 | [
"Apache-2.0"
] | 10 | 2017-03-04T02:59:42.000Z | 2021-11-14T12:40:54.000Z | # Python 2/3 compatibility
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import pandas as pd
import xscale.signal.fitting as xfit
def test_polyfit():
Nt, Nx, Ny = 100, 128, 128
rand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])
truth = rand + slopes * rand.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
linfit = xfit.polyfit(truth, dim='time').load()
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
assert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,
rtol=5e-2, atol=1e-3)
def test_linreg():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
slopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')
assert np.allclose(slopes, slopes_fitted.mean(dim='y').load())
assert np.allclose(offset, offsets_fitted.mean(dim='y').load())
def test_trend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
trend_mean = xfit.trend(offset, dim='time', type='constant')
trend_linear = xfit.trend(truth, dim='time', type='linear')
assert np.allclose(offset, trend_mean.load())
assert np.allclose(truth, trend_linear.load())
def test_detrend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
assert np.allclose(0 * offset, xfit.detrend(offset, dim='time',
type='constant').load())
assert np.allclose(0 * offset, xfit.detrend(truth, dim='time',
type='linear').load())
def test_sinfit():
Nt, Nx, Ny = 100, 128, 128
zeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])
zeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',
periods=100, freq='H'))
offset = 0.4
amp1, phi1 = 1.2, 0.
wave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +
phi1 * np.pi / 180.)
amp2, phi2 = 1.9, 60.
wave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +
phi2 * np.pi / 180.)
truth = offset + zeros + wave1 + wave2
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
# Fit both waves
fit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()
assert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)
assert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,
atol=1e-4)
assert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)
assert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)
assert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)
# Fit only one wave (wave2)
fit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()
# Compare with 5% relative tolerance (error induced by wave1)
assert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),
amp2, rtol=5e-2)
assert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),
phi2, rtol=5e-2)
# Fit only one dimensional data
xfit.sinfit(truth.isel(x=0, y=0), dim='time',
periods=[24, 12],
unit='h').load()
def test_sinval():
Nt, Nx, Ny = 100, 128, 128
offset = 0.4
periods = [24., 12.]
amp1, phi1 = 1.2, 0.
amp2, phi2 = 1.9, 60.
time = xr.DataArray(pd.date_range(start='2011-01-01',
periods=Nt,
freq='H'),
dims='time')
amp = xr.DataArray([amp1, amp2], dims='periods')
phi = xr.DataArray([phi1, phi2], dims='periods')
ones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])
var_dict = {'amplitude': amp * ones,
'phase': phi * ones,
'offset': offset * ones}
ds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})
ds = ds.assign_coords(periods=periods)
ds['periods'].attrs['units'] = 'h'
xfit.sinval(ds, time)
#One mode reconstruction
xfit.sinval(ds.sel(periods=[24,]), time)
def test_order_and_stack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
assert rand_stacked.dims[0] is 'y'
assert rand_stacked.dims[-1] is 'temp_dim'
assert rand_stacked.shape[-1] == 128 * 100
# Test the exception for 1d array
rand1d = rand.isel(time=0, x=0)
rand1d_stacked = xfit._order_and_stack(rand1d, 'y')
assert np.array_equal(rand1d_stacked, rand1d)
def test_unstack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
rand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))
assert rand_unstacked.dims == ('time', 'x')
assert rand_unstacked.shape == (100, 128) | 43.476923 | 77 | 0.607396 |
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import pandas as pd
import xscale.signal.fitting as xfit
def test_polyfit():
Nt, Nx, Ny = 100, 128, 128
rand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])
truth = rand + slopes * rand.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
linfit = xfit.polyfit(truth, dim='time').load()
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
assert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,
rtol=5e-2, atol=1e-3)
def test_linreg():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
slopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')
assert np.allclose(slopes, slopes_fitted.mean(dim='y').load())
assert np.allclose(offset, offsets_fitted.mean(dim='y').load())
def test_trend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
trend_mean = xfit.trend(offset, dim='time', type='constant')
trend_linear = xfit.trend(truth, dim='time', type='linear')
assert np.allclose(offset, trend_mean.load())
assert np.allclose(truth, trend_linear.load())
def test_detrend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
assert np.allclose(0 * offset, xfit.detrend(offset, dim='time',
type='constant').load())
assert np.allclose(0 * offset, xfit.detrend(truth, dim='time',
type='linear').load())
def test_sinfit():
Nt, Nx, Ny = 100, 128, 128
zeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])
zeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',
periods=100, freq='H'))
offset = 0.4
amp1, phi1 = 1.2, 0.
wave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +
phi1 * np.pi / 180.)
amp2, phi2 = 1.9, 60.
wave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +
phi2 * np.pi / 180.)
truth = offset + zeros + wave1 + wave2
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
fit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()
assert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)
assert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,
atol=1e-4)
assert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)
assert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)
assert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)
fit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()
assert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),
amp2, rtol=5e-2)
assert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),
phi2, rtol=5e-2)
xfit.sinfit(truth.isel(x=0, y=0), dim='time',
periods=[24, 12],
unit='h').load()
def test_sinval():
Nt, Nx, Ny = 100, 128, 128
offset = 0.4
periods = [24., 12.]
amp1, phi1 = 1.2, 0.
amp2, phi2 = 1.9, 60.
time = xr.DataArray(pd.date_range(start='2011-01-01',
periods=Nt,
freq='H'),
dims='time')
amp = xr.DataArray([amp1, amp2], dims='periods')
phi = xr.DataArray([phi1, phi2], dims='periods')
ones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])
var_dict = {'amplitude': amp * ones,
'phase': phi * ones,
'offset': offset * ones}
ds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})
ds = ds.assign_coords(periods=periods)
ds['periods'].attrs['units'] = 'h'
xfit.sinval(ds, time)
xfit.sinval(ds.sel(periods=[24,]), time)
def test_order_and_stack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
assert rand_stacked.dims[0] is 'y'
assert rand_stacked.dims[-1] is 'temp_dim'
assert rand_stacked.shape[-1] == 128 * 100
rand1d = rand.isel(time=0, x=0)
rand1d_stacked = xfit._order_and_stack(rand1d, 'y')
assert np.array_equal(rand1d_stacked, rand1d)
def test_unstack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
rand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))
assert rand_unstacked.dims == ('time', 'x')
assert rand_unstacked.shape == (100, 128) | true | true |
f724fdc85b1773dff69d97659ac96bcf9ba268b2 | 937 | py | Python | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 35,083 | 2015-01-01T03:05:13.000Z | 2022-03-31T21:57:40.000Z | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 32,117 | 2015-01-01T00:00:24.000Z | 2022-03-31T23:54:58.000Z | sql/hive/src/test/resources/data/scripts/dumpdata_script.py | OlegPt/spark | c79fd911ca85f883c493c5e888f7690868d7b5ea | [
"Apache-2.0"
] | 29,687 | 2015-01-01T02:40:43.000Z | 2022-03-31T16:49:33.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for i in range(50):
for j in range(5):
for k in range(20022):
print(20000 * i + k)
for line in sys.stdin:
pass
| 33.464286 | 61 | 0.736393 |
import sys
for i in range(50):
for j in range(5):
for k in range(20022):
print(20000 * i + k)
for line in sys.stdin:
pass
| true | true |
f724feb3c1b587e44e19364e647668548b195782 | 860 | py | Python | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 18 | 2016-03-19T10:57:43.000Z | 2021-10-10T07:52:51.000Z | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 3 | 2019-06-13T03:15:11.000Z | 2020-06-05T18:16:52.000Z | grnglow/glow/views/home.py | xiaokai111/green-glow | 19e399c32ee4d3cfc026684b06f49df9b7dd10d5 | [
"MIT"
] | 11 | 2017-05-15T14:24:17.000Z | 2021-10-10T07:52:56.000Z | # -*- encoding: utf-8 -*-
'''
Created on 2012-3-23
@author: Neil
'''
from django.shortcuts import render_to_response
from grnglow.glow.views import people
from grnglow.glow.models.photo import Photo
def base(request):
return render_to_response('base.html')
def index(request):
if request.user.is_authenticated():
# 默认情况下,people.home(request,user_id)的user_id参数应该为字符串
return people.home(request, str(request.user.id)) # 如果已登录,跳转到我的个人页
# return render_to_response('index.html', {'request':request})
else:
photos = Photo.objects.all().order_by('-score')[0:12] # 按得分倒序,最大的排在前面
p_len = len(photos)
p_items = []
for i in range(0, p_len, 6):
p_items.extend([photos[i:i + 6]]) # 在末端添加列表元素
return render_to_response('index.html', {'request': request, 'p_items': p_items})
| 28.666667 | 89 | 0.660465 |
from django.shortcuts import render_to_response
from grnglow.glow.views import people
from grnglow.glow.models.photo import Photo
def base(request):
return render_to_response('base.html')
def index(request):
if request.user.is_authenticated():
return people.home(request, str(request.user.id))
else:
photos = Photo.objects.all().order_by('-score')[0:12]
p_len = len(photos)
p_items = []
for i in range(0, p_len, 6):
p_items.extend([photos[i:i + 6]])
return render_to_response('index.html', {'request': request, 'p_items': p_items})
| true | true |
f72500addb9c5aa51a6fb2310b80123201744064 | 5,721 | py | Python | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 34 | 2016-04-28T13:35:50.000Z | 2022-02-21T08:25:21.000Z | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 2 | 2020-02-07T16:37:19.000Z | 2021-01-13T16:57:40.000Z | parsers/file_name_validators.py | ddexnet/dsrf | 1dc231ef911e9ee4fbf2fae77ceaef08755f3f7e | [
"Apache-2.0"
] | 16 | 2016-05-20T12:30:20.000Z | 2022-03-24T13:44:16.000Z | # Lint as: python2, python3
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects to validate a single file name in a dsrf report."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
from dsrf import constants
from dsrf import error
class FileNameValidator(object):
"""A single file name validator."""
def __init__(self, expected_components):
self.expected_components = expected_components
def validate_value(self, file_name):
"""Validates that a filename consists of the expected components.
Args:
file_name: File name to validate.
Returns:
A dictionary of {component_name = component_value}
(eg. {'ServiceDescription': 'AdSupport'}).
"""
warnings = set()
file_name_dict = self.split_file_name(file_name, self.expected_components)
try:
self.validate_xofy(file_name_dict['x'], file_name_dict['y'], file_name)
self.validate_prefix(file_name_dict['DSR'], file_name,)
self.validate_suffix(file_name_dict['ext'], file_name)
self.validate_message_notification_period(
file_name_dict['MessageNotificationPeriod'], file_name)
self.validate_territory_of_use_or_sale(
file_name_dict['TerritoryOfUseOrSale'], file_name)
self.validate_message_created_datetime(
file_name_dict['MessageCreatedDateTime'], file_name)
except KeyError:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
except error.FileNameValidationWarning as e:
warnings.add(e)
return file_name_dict, warnings
@classmethod
def validate_xofy(cls, x, y, file_name):
try:
if int(x) <= int(y):
return x, y
except ValueError:
pass
raise error.FileNameValidationFailure(
file_name, 'File number is not an integer or does not exist.')
@classmethod
def validate_prefix(cls, prefix, file_name):
if prefix != constants.FILE_NAME_PREFIX:
raise error.FileNameValidationFailure(
file_name, 'File name should start with %s.' %
constants.FILE_NAME_PREFIX)
return prefix
@classmethod
def validate_suffix(cls, suffix, file_name):
if suffix not in constants.SUPPORTED_FILE_EXTENSIONS:
raise error.FileNameValidationFailure(
file_name, 'Suffix "%s" is not valid, supported suffixes: %s.' % (
suffix, constants.SUPPORTED_FILE_EXTENSIONS))
return suffix
@classmethod
def validate_message_notification_period(cls, mnp, file_name):
if not constants.MESSAGE_NOTIFICATION_PERIOD_PATTERN.match(mnp):
raise error.FileNameValidationFailure(
file_name, 'Message Notification Period "%s" is invalid, should be '
'ISO 8601:2004 period format.' % mnp)
return mnp
@classmethod
def validate_territory_of_use_or_sale(cls, touos, file_name):
"""TerritoryOfUseOrSale may also be freeform, so this is just a warning."""
if not constants.TERRITORY_OF_USE_OR_SALE_PATTERN.match(touos):
raise error.FileNameValidationWarning(
file_name,
'It is recommended that the TerritoryOfUseOrSale be set to a '
'CISAC TIS code or a two-letter ISO code (use "multi" or "worldwide" '
'for multiple territories). Provided value: "%s"' % touos)
return touos
@classmethod
def validate_message_created_datetime(cls, mcdt, file_name):
if not constants.MESSAGE_CREATED_DATETIME_PATTERN.match(mcdt):
raise error.FileNameValidationFailure(
file_name, 'MessageCreated-DateTime "%s" is invalid, should be '
'yyyyymmddThhmmss.' % mcdt)
return mcdt
@classmethod
def split_file_name(cls, file_name, expected_components):
"""Splits the file name to a dictionary keyed by components names.
Args:
file_name: File name to split.
expected_components: A list of the expected file name parts.
Returns:
A dictionary of the file name components names (keys) and the given file
name parts (values).
"""
basic_split = file_name.split(constants.FILE_NAME_DELIMITER)
if len(basic_split) != len(constants.FILE_NAME_COMPONENTS) - 2:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
xofy = basic_split[-2]
message_created_time_ext = basic_split[-1]
file_name_parts = basic_split[:-2]
xofy = xofy.split('of')
message_created_time_ext = message_created_time_ext.split('.', 1)
file_name_parts.extend(xofy)
file_name_parts.extend(message_created_time_ext)
if len(file_name_parts) != len(constants.FILE_NAME_COMPONENTS):
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
file_name_dict = {component_name: value for component_name, value in
zip(expected_components, file_name_parts)}
return file_name_dict
| 38.918367 | 80 | 0.707394 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
from dsrf import constants
from dsrf import error
class FileNameValidator(object):
def __init__(self, expected_components):
self.expected_components = expected_components
def validate_value(self, file_name):
warnings = set()
file_name_dict = self.split_file_name(file_name, self.expected_components)
try:
self.validate_xofy(file_name_dict['x'], file_name_dict['y'], file_name)
self.validate_prefix(file_name_dict['DSR'], file_name,)
self.validate_suffix(file_name_dict['ext'], file_name)
self.validate_message_notification_period(
file_name_dict['MessageNotificationPeriod'], file_name)
self.validate_territory_of_use_or_sale(
file_name_dict['TerritoryOfUseOrSale'], file_name)
self.validate_message_created_datetime(
file_name_dict['MessageCreatedDateTime'], file_name)
except KeyError:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
except error.FileNameValidationWarning as e:
warnings.add(e)
return file_name_dict, warnings
@classmethod
def validate_xofy(cls, x, y, file_name):
try:
if int(x) <= int(y):
return x, y
except ValueError:
pass
raise error.FileNameValidationFailure(
file_name, 'File number is not an integer or does not exist.')
@classmethod
def validate_prefix(cls, prefix, file_name):
if prefix != constants.FILE_NAME_PREFIX:
raise error.FileNameValidationFailure(
file_name, 'File name should start with %s.' %
constants.FILE_NAME_PREFIX)
return prefix
@classmethod
def validate_suffix(cls, suffix, file_name):
if suffix not in constants.SUPPORTED_FILE_EXTENSIONS:
raise error.FileNameValidationFailure(
file_name, 'Suffix "%s" is not valid, supported suffixes: %s.' % (
suffix, constants.SUPPORTED_FILE_EXTENSIONS))
return suffix
@classmethod
def validate_message_notification_period(cls, mnp, file_name):
if not constants.MESSAGE_NOTIFICATION_PERIOD_PATTERN.match(mnp):
raise error.FileNameValidationFailure(
file_name, 'Message Notification Period "%s" is invalid, should be '
'ISO 8601:2004 period format.' % mnp)
return mnp
@classmethod
def validate_territory_of_use_or_sale(cls, touos, file_name):
if not constants.TERRITORY_OF_USE_OR_SALE_PATTERN.match(touos):
raise error.FileNameValidationWarning(
file_name,
'It is recommended that the TerritoryOfUseOrSale be set to a '
'CISAC TIS code or a two-letter ISO code (use "multi" or "worldwide" '
'for multiple territories). Provided value: "%s"' % touos)
return touos
@classmethod
def validate_message_created_datetime(cls, mcdt, file_name):
if not constants.MESSAGE_CREATED_DATETIME_PATTERN.match(mcdt):
raise error.FileNameValidationFailure(
file_name, 'MessageCreated-DateTime "%s" is invalid, should be '
'yyyyymmddThhmmss.' % mcdt)
return mcdt
@classmethod
def split_file_name(cls, file_name, expected_components):
basic_split = file_name.split(constants.FILE_NAME_DELIMITER)
if len(basic_split) != len(constants.FILE_NAME_COMPONENTS) - 2:
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
xofy = basic_split[-2]
message_created_time_ext = basic_split[-1]
file_name_parts = basic_split[:-2]
xofy = xofy.split('of')
message_created_time_ext = message_created_time_ext.split('.', 1)
file_name_parts.extend(xofy)
file_name_parts.extend(message_created_time_ext)
if len(file_name_parts) != len(constants.FILE_NAME_COMPONENTS):
raise error.FileNameValidationFailure(
file_name, 'bad name structure, expected format: %s.' %
constants.FILE_NAME_FORMAT)
file_name_dict = {component_name: value for component_name, value in
zip(expected_components, file_name_parts)}
return file_name_dict
| true | true |
f725013d099dbb0b8a35ade5b1cc606b7b8eb889 | 3,373 | py | Python | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | webcams/eye_status.py | OlegBezverhii/python-notebooks | 5d4b501173a2f3519bff9a085c3d2190ce6cf808 | [
"MIT"
] | null | null | null | import os
from PIL import Image
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from imageio import imread, imwrite
from skimage.transform import resize
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
def load_model():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
print(img)
img = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255
print(img)
img = img.reshape(1,IMG_SIZE,IMG_SIZE,1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
print(acc * 100)
if __name__ == '__main__':
train_generator , val_generator = collect()
train(train_generator,val_generator)
| 26.769841 | 103 | 0.731693 | import os
from PIL import Image
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from imageio import imread, imwrite
from skimage.transform import resize
IMG_SIZE = 24
def collect():
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True, )
train_generator = train_datagen.flow_from_directory(
directory="dataset/train",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
val_generator = val_datagen.flow_from_directory(
directory="dataset/val",
target_size=(IMG_SIZE, IMG_SIZE),
color_mode="grayscale",
batch_size=32,
class_mode="binary",
shuffle=True,
seed=42
)
return train_generator, val_generator
def save_model(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
def load_model():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return loaded_model
def train(train_generator, val_generator):
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=val_generator.n//val_generator.batch_size
print('[LOG] Intialize Neural Network')
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(IMG_SIZE,IMG_SIZE,1)))
model.add(AveragePooling2D())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=val_generator,
validation_steps=STEP_SIZE_VALID,
epochs=20
)
save_model(model)
def predict(img, model):
img = Image.fromarray(img, 'RGB').convert('L')
print(img)
img = resize(img, (IMG_SIZE,IMG_SIZE)).astype('float32')/255
print(img)
img = img.reshape(1,IMG_SIZE,IMG_SIZE,1)
prediction = model.predict(img)
if prediction < 0.1:
prediction = 'closed'
elif prediction > 0.9:
prediction = 'open'
else:
prediction = 'idk'
return prediction
def evaluate(X_test, y_test):
model = load_model()
print('Evaluate model')
loss, acc = model.evaluate(X_test, y_test, verbose = 0)
print(acc * 100)
if __name__ == '__main__':
train_generator , val_generator = collect()
train(train_generator,val_generator)
| true | true |
f7250248eaa636892462bb0e99e0d5df70467f27 | 22,991 | py | Python | flaski/apps/main/ihistogram.py | mpg-age-bioinformatics/flaski | f56e00dd80d8706ecb8593ba6585a97eed881896 | [
"MIT"
] | 9 | 2020-08-03T01:22:59.000Z | 2022-03-03T02:02:04.000Z | flaski/apps/main/ihistogram.py | mpg-age-bioinformatics/flaski | f56e00dd80d8706ecb8593ba6585a97eed881896 | [
"MIT"
] | 79 | 2020-06-03T06:34:46.000Z | 2021-09-22T13:31:43.000Z | flaski/apps/main/ihistogram.py | mpg-age-bioinformatics/flaski | f56e00dd80d8706ecb8593ba6585a97eed881896 | [
"MIT"
] | 5 | 2020-10-05T10:20:23.000Z | 2022-03-01T14:23:12.000Z | #from matplotlib.figure import Figure
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from collections import OrderedDict
import numpy as np
import sys
def GET_COLOR(x):
if str(x)[:3].lower() == "rgb":
vals=x.split("rgb(")[-1].split(")")[0].split(",")
vals=[ float(s.strip(" ")) for s in vals ]
#vals=tuple(vals)
return vals
else:
return str(x)
def make_figure(df,pa):
"""Generates figure.
Args:
df (pandas.core.frame.DataFrame): Pandas DataFrame containing the input data.
pa (dict): A dictionary of the style { "argument":"value"} as outputted by `figure_defaults`.
Returns:
A Plotly figure
"""
tmp=df.copy()
tmp=tmp[pa["vals"]]
fig = go.Figure( )
# MAIN FIGURE
#Load checkboxes
pab={}
# print("Main", pa["kde"])
for arg in ["show_legend","upper_axis","lower_axis","left_axis","right_axis","errorbar",\
"errorbar_symmetric","tick_left_axis","tick_lower_axis","tick_upper_axis","tick_right_axis",\
"kde","show_hist","show_curve","show_rug"]:
if pa[arg] in ["off",".off"]:
pab[arg]=False
else:
pab[arg]=True
# if arg in ["upper_axis","lower_axis","left_axis","right_axis"]:
# print(arg, pa[arg], pab[arg])
#Load floats
floats=["bin_size","errorbar_value","errorbar_thickness","errorbar_width","x","y","axis_line_width","ticks_line_width",\
"ticks_length","x_lower_limit","x_upper_limit","y_lower_limit","y_upper_limit","spikes_thickness","xticks_rotation",\
"yticks_rotation","xticks_fontsize","yticks_fontsize","grid_width","legend_borderwidth","legend_tracegroupgap","legend_x",\
"legend_y","fig_width","fig_height"]
for a in floats:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=float(pa[a])
#Load integers
integers=["label_fontsize","legend_fontsize","legend_title_fontsize","title_fontsize","maxxticks","maxyticks"]
for a in integers:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=int(pa[a])
#Load Nones
possible_nones=["errorbar_color","title_fontcolor","axis_line_color","ticks_color","spikes_color","label_fontcolor",\
"paper_bgcolor","plot_bgcolor","grid_color","legend_bgcolor","legend_bordercolor","legend_fontcolor","legend_title_fontcolor",\
"title_fontfamily","label_fontfamily","legend_fontfamily","legend_title_fontfamily"]
for p in possible_nones:
if pa[p] == "None" or pa[p]=="Default" :
pab[p]=None
else:
pab[p]=pa[p]
#KDE (KERNEL DENSITY ESTIMATION) plot
if pab["kde"]==True:
colors=list()
if pa["rug_text"]!="":
rug_text=pa["rug_text"].split(",")
else:
rug_text=[]
for h in pa["groups_settings"].values():
if h["color_rgb"] == "":
if h["color_value"]=="None":
colors.append(None)
else:
colors.append(h["color_value"])
else:
colors.append(GET_COLOR(h["color_rgb"]))
hist_data=[]
for col in tmp.columns:
hist_data.append(tmp[col].dropna())
if (not pab["show_hist"]) & (not pab["show_curve"]):
pa["show_curve"]="on"
pab["show_curve"]=True
fig=ff.create_distplot(hist_data=hist_data, group_labels=pa["vals"],curve_type=pa["curve_type"],show_hist=pab["show_hist"],\
show_curve=pab["show_curve"],show_rug=pab["show_rug"],bin_size=pab["bin_size"],rug_text=rug_text,colors=colors, histnorm=pa["kde_histnorm"])
else:
for h in pa["groups_settings"].values():
#Initialize dummie dict
h_=dict()
#Load integers
integers=["hover_fontsize","bins_number"]
for a in integers:
if h[a] == "" or h[a]=="None" or h[a] == None:
h_[a]=None
else:
h_[a]=int(h[a])
#Load Nones
possible_nones=["hover_bgcolor","hover_bordercolor","hover_fontfamily","hover_fontcolor"]
for p in possible_nones:
if h[p] == "None" or h[p]=="Default" :
h_[p]=None
else:
h_[p]=h[p]
#Load floats
floats=["opacity","linewidth"]
for a in floats:
if h[a] == "":
h_[a]=None
else:
h_[a]=float(h[a])
if h["label"]!="":
name=h["label"]
else:
name=""
if h["text"]!="":
text=h["text"]
else:
text=""
if h["color_rgb"] == "":
if h["color_value"]=="None":
marker_color=None
else:
marker_color = h["color_value"]
else:
marker_color = GET_COLOR( h["color_rgb"] )
if h["line_rgb"] == "":
if h["line_color"]=="None":
line_color=None
else:
line_color = h["line_color"]
else:
line_color = GET_COLOR( h["line_rgb"] )
if h["histnorm"] == "None":
histnorm = ""
else:
histnorm = h["histnorm"]
if h["cumulative"]=="on":
cumulative_enabled=True
else:
cumulative_enabled=False
marker=dict(color=marker_color,line=dict(width=h_["linewidth"],color=line_color))
cumulative=dict(enabled=cumulative_enabled,direction=h["cumulative_direction"])
hoverlabel=dict(bgcolor=h_["hover_bgcolor"],bordercolor=h_["hover_bordercolor"],align=h["hover_align"],\
font=dict(family=h_["hover_fontfamily"],size=h_["hover_fontsize"],color=h_["hover_fontcolor"]))
if pab["errorbar"]==True:
errorbar=True
if h["orientation_value"]=="vertical":
if pab["errorbar"]==True:
error_y=dict(visible=errorbar,value=pab["errorbar_value"],type=pa["errorbar_type"],symmetric=pab["errorbar_symmetric"],color=pab["errorbar_color"],\
thickness=pab["errorbar_thickness"],width=pab["errorbar_width"])
else:
error_y=dict(visible=False)
fig.add_trace(go.Histogram(x=tmp[h["name"]].dropna(),text=text,hoverinfo=h["hoverinfo"],histfunc=h["histfunc"],cumulative=cumulative,\
opacity=h_["opacity"],nbinsx=h_["bins_number"],name=name,marker=marker,error_y=error_y,hoverlabel=hoverlabel,histnorm=histnorm))
elif h["orientation_value"]=="horizontal":
if pab["errorbar"]==True:
error_x=dict(visible=errorbar,value=pab["errorbar_value"],type=pa["errorbar_type"],symmetric=pab["errorbar_symmetric"],color=pab["errorbar_color"],\
thickness=pab["errorbar_thickness"],width=pab["errorbar_width"])
else:
error_x=dict(visible=False)
fig.add_trace(go.Histogram(y=tmp[h["name"]].dropna(),text=text,hoverinfo=h["hoverinfo"],histfunc=h["histfunc"],cumulative=cumulative,\
opacity=h_["opacity"],nbinsy=h_["bins_number"],name=name,marker=marker,error_x=error_x,hoverlabel=hoverlabel,histnorm=histnorm))
#UPDATE LAYOUT OF HISTOGRAMS
#Figure size
fig.update_layout( width=pab["fig_width"], height=pab["fig_height"] ) # autosize=False,
#Update title
title=dict(text=pa["title"],font=dict(family=pab["title_fontfamily"],size=pab["title_fontsize"],color=pab["title_fontcolor"]),\
xref=pa["xref"],yref=pa["yref"],x=pab["x"],y=pab["y"],xanchor=pa["title_xanchor"],yanchor=pa["title_yanchor"])
fig.update_layout(title=title,barmode=pa["barmode"])
#Update axes
if pa["log_scale"]==True and pa["orientation"]=="vertical":
fig.update_yaxes(type="log")
elif pa["log_scale"]==True and pa["orientation"]=="horizontal":
fig.update_xaxes(type="log")
# print(pab["lower_axis"],pab["axis_line_width"],pab["axis_line_color"],pab["upper_axis"])
fig.update_xaxes(zeroline=False, showline=pab["lower_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"], mirror=pab["upper_axis"])
fig.update_yaxes(zeroline=False, showline=pab["left_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"],mirror=pab["right_axis"])
#Update ticks
if pab["tick_lower_axis"]==False and pab["tick_right_axis"]==False and pab["tick_left_axis"]==False and pab["tick_upper_axis"]==False:
pa["ticks_direction_value"]=""
ticks=""
else:
ticks=pa["ticks_direction_value"]
fig.update_xaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
fig.update_yaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
#Update mirror property of axis based on ticks and axis selected by user
#Determines if the axis lines or/and ticks are mirrored to the opposite side of the plotting area.
# If "True", the axis lines are mirrored. If "ticks", the axis lines and ticks are mirrored. If "False", mirroring is disable.
# If "all", axis lines are mirrored on all shared-axes subplots. If "allticks", axis lines and ticks are mirrored on all shared-axes subplots.
if pab["tick_upper_axis"] :
fig.update_xaxes(mirror="ticks")
# elif pab["upper_axis"] :
# fig.update_xaxes(mirror=True)
# else:
# fig.update_xaxes(mirror=False)
if pab["tick_right_axis"]:
fig.update_yaxes(mirror="ticks")
# elif pab["right_axis"]:
# fig.update_yaxes(mirror=True)
# else:
# fig.update_yaxes(mirror=False)
# fig.update_yaxes(mirror=True)
if (pa["x_lower_limit"]!="") and (pa["x_upper_limit"]!="") :
xmin=pab["x_lower_limit"]
xmax=pab["x_upper_limit"]
fig.update_xaxes(range=[xmin, xmax])
if (pa["y_lower_limit"]!="") and (pa["y_upper_limit"]!="") :
ymin=pab["y_lower_limit"]
ymax=pab["y_upper_limit"]
fig.update_yaxes(range=[ymin, ymax])
if pa["maxxticks"]!="":
fig.update_xaxes(nticks=pab["maxxticks"])
if pa["maxyticks"]!="":
fig.update_yaxes(nticks=pab["maxyticks"])
#Update spikes
if pa["spikes_value"]=="both":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="x":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="y":
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="None":
fig.update_xaxes(showspikes=None)
fig.update_yaxes(showspikes=None)
#UPDATE X AXIS AND Y AXIS LAYOUT
xaxis=dict(visible=True, title=dict(text=pa["xlabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
yaxis=dict(visible=True, title=dict(text=pa["ylabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
fig.update_layout(paper_bgcolor=pab["paper_bgcolor"],plot_bgcolor=pab["plot_bgcolor"],xaxis = xaxis,yaxis = yaxis)
fig.update_xaxes(tickangle=pab["xticks_rotation"], tickfont=dict(size=pab["xticks_fontsize"]))
fig.update_yaxes(tickangle=pab["yticks_rotation"], tickfont=dict(size=pab["yticks_fontsize"]))
#UPDATE GRID PROPERTIES
if pa["grid_value"] == "None":
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=False)
elif pa["grid_value"]=="x":
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="y":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="both":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_layout(template='plotly_white')
#UPDATE LEGEND PROPERTIES
if pab["show_legend"]==True:
if pa["legend_orientation"]=="vertical":
legend_orientation="v"
elif pa["legend_orientation"]=="horizontal":
legend_orientation="h"
fig.update_layout(showlegend=True,legend=dict(x=pab["legend_x"],y=pab["legend_y"],bgcolor=pab["legend_bgcolor"],bordercolor=pab["legend_bordercolor"],\
borderwidth=pab["legend_borderwidth"],valign=pa["legend_valign"],\
font=dict(family=pab["legend_fontfamily"],size=pab["legend_fontsize"],color=pab["legend_fontcolor"]),orientation=legend_orientation,\
traceorder=pa["legend_traceorder"],tracegroupgap=pab["legend_tracegroupgap"],\
title=dict(text=pa["legend_title"],side=pa["legend_side"],font=dict(family=pab["legend_title_fontfamily"],size=pab["legend_title_fontsize"],\
color=pab["legend_title_fontcolor"]))))
else:
fig.update_layout(showlegend=False)
return fig
STANDARD_SIZES=[str(i) for i in list(range(1,101))]
STANDARD_COLORS=["None","aliceblue","antiquewhite","aqua","aquamarine","azure","beige",\
"bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood",\
"cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk",\
"crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgrey",\
"darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid",\
"darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey",\
"darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue",\
"firebrick","floralwhite","forestgreen","fuchsia","gainsboro","ghostwhite","gold",\
"goldenrod","gray","grey","green","greenyellow","honeydew","hotpink","indianred","indigo",\
"ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral",\
"lightcyan","lightgoldenrodyellow","lightgray","lightgrey","lightgreen","lightpink","lightsalmon",\
"lightseagreen","lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow",\
"lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid",\
"mediumpurple","mediumseagreen","mediumslateblue","mediumspringgreen","mediumturquoise",\
"mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy",\
"oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen",\
"paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue",\
"purple","red","rosybrown","royalblue","rebeccapurple","saddlebrown","salmon","sandybrown",\
"seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","slategrey","snow",\
"springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white",\
"whitesmoke","yellow","yellowgreen"]
STANDARD_HISTNORMS=['None', 'percent', 'probability', 'density', 'probability density']
LINE_STYLES=["solid", "dot", "dash", "longdash", "dashdot","longdashdot"]
STANDARD_BARMODES=["stack", "group","overlay","relative"]
STANDARD_ORIENTATIONS=['vertical','horizontal']
STANDARD_ALIGNMENTS=["left","right","auto"]
STANDARD_VERTICAL_ALIGNMENTS=["top", "middle","bottom"]
STANDARD_FONTS=["Arial", "Balto", "Courier New", "Default", "Droid Sans", "Droid Serif", "Droid Sans Mono",\
"Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman"]
TICKS_DIRECTIONS=["inside","outside",'']
LEGEND_LOCATIONS=['best','upper right','upper left','lower left','lower right','right','center left','center right','lower center','upper center','center']
MODES=["expand",None]
STANDARD_HOVERINFO=["x", "y", "z", "text", "name","all","none","skip","x+y","x+text","x+name",\
"y+text","y+name","text+name","x+y+name","x+y+text","x+text+name","y+text+name"]
STANDARD_HISTFUNC=["count","sum","avg","min","max"]
STANDARD_CUMULATIVE_DIRECTIONS=["increasing","decreasing"]
STANDARD_ERRORBAR_TYPES=["percent","constant","sqrt"]
STANDARD_REFERENCES=["container","paper"]
STANDARD_TITLE_XANCHORS=["auto","left","center","right"]
STANDARD_TITLE_YANCHORS=["top","middle","bottom"]
STANDARD_LEGEND_XANCHORS=["auto","left","center","right"]
STANDARD_LEGEND_YANCHORS=["auto","top","middle","bottom"]
STANDARD_TRACEORDERS=["reversed", "grouped", "reversed+grouped", "normal"]
STANDARD_SIDES=["top","left","top left"]
STANDARD_SPIKEMODES=["toaxis", "across", "marker","toaxis+across","toaxis+marker","across+marker","toaxis+across+marker"]
STANDARD_CURVETYPES=["kde","normal"]
def figure_defaults():
""" Generates default figure arguments.
Returns:
dict: A dictionary of the style { "argument":"value"}
"""
# https://matplotlib.org/3.1.1/api/markers_api.html
# https://matplotlib.org/2.0.2/api/colors_api.html
# lists allways need to have thee default value after the list
# eg.:
# "title_size":standard_sizes,\
# "titles":"20"
# "fig_size_x"="6"
# "fig_size_y"="6"
plot_arguments={"fig_width":"600",\
"fig_height":"600",\
"title":'iHistogram',\
"title_fontsize":"20",\
"title_fontfamily":"Default",\
"title_fontcolor":"None",\
"titles":"20",\
"kde":".off",\
"curve_type":"kde",\
"curve_types":STANDARD_CURVETYPES,\
"kde_histnorm":"probability density",\
"kde_histnorms":["probability density","probability"],\
"show_hist":".off",\
"show_curve":".on",\
"show_rug":".off",\
"rug_text":"",\
"bin_size":"1",\
"opacity":0.8,\
"paper_bgcolor":"white",\
"plot_bgcolor":"white",\
"hoverinfos":STANDARD_HOVERINFO,\
"hover_alignments":STANDARD_ALIGNMENTS,\
"histfuncs":STANDARD_HISTFUNC,\
"references":STANDARD_REFERENCES,\
"xref":"container",\
"yref":"container",\
"x":"0.5",\
"y":"0.9",\
"title_xanchors":STANDARD_TITLE_XANCHORS,\
"title_yanchors":STANDARD_TITLE_YANCHORS,\
"title_xanchor":"auto",\
"title_yanchor":"auto",\
"show_legend":"on",\
"errorbar":".off",\
"errorbar_value":"10",\
"errorbar_type":"percent",\
"errorbar_types":STANDARD_ERRORBAR_TYPES,\
"errorbar_symmetric":".off",\
"errorbar_color":"darkgrey",\
"errorbar_width":"2",\
"errorbar_thickness":"2",\
"axis_line_width":1.0,\
"axis_line_color":"lightgrey",\
"ticks_line_width":1.0,\
"ticks_color":"lightgrey",\
"cols":[],\
"groups":[],\
"vals":[],\
"groups_settings":dict(),\
"log_scale":".off",\
"fonts":STANDARD_FONTS,\
"cumulative_directions":STANDARD_CUMULATIVE_DIRECTIONS,\
"colors":STANDARD_COLORS,\
"histnorms":STANDARD_HISTNORMS,\
"barmode":"overlay",\
"barmodes":STANDARD_BARMODES,\
"histtype_value":"bar",\
"linestyles":LINE_STYLES,\
"linestyle_value":"",\
"orientations":STANDARD_ORIENTATIONS, \
"fontsizes":STANDARD_SIZES,\
"xlabel_size":STANDARD_SIZES,\
"ylabel_size":STANDARD_SIZES,\
"xlabel":"",\
"ylabel":"",\
"label_fontfamily":"Default",\
"label_fontsize":"15",\
"label_fontcolor":"None",\
"xlabels":"14",\
"ylabels":"14",\
"left_axis":".on" ,\
"right_axis":".on",\
"upper_axis":".on",\
"lower_axis":".on",\
"tick_left_axis":".on" ,\
"tick_right_axis":".off",\
"tick_upper_axis":".off",\
"tick_lower_axis":".on",\
"ticks_direction":TICKS_DIRECTIONS,\
"ticks_direction_value":TICKS_DIRECTIONS[1],\
"ticks_length":"6.0",\
"xticks_fontsize":"14",\
"yticks_fontsize":"14",\
"xticks_rotation":"0",\
"yticks_rotation":"0",\
"x_lower_limit":"",\
"y_lower_limit":"",\
"x_upper_limit":"",\
"y_upper_limit":"",\
"maxxticks":"",\
"maxyticks":"",\
"spikes":["None","both","x","y"],\
"spikes_value":"None",\
"spikes_color":"None",\
"spikes_thickness":"3.0",\
"dashes":LINE_STYLES,\
"spikes_dash":"dash",\
"spikes_mode":"toaxis",\
"spikes_modes":STANDARD_SPIKEMODES,\
"grid":["None","both","x","y"],\
"grid_value":"None",\
"grid_width":"1",\
"grid_color":"lightgrey",\
"legend_title":"",\
"legend_bgcolor":"None",\
"legend_borderwidth":"0",\
"legend_bordercolor":"None",\
"legend_fontfamily":"Default",\
"legend_fontsize":"12",\
"legend_fontcolor":"None",\
"legend_title_fontfamily":"Default",\
"legend_title_fontsize":"12",\
"legend_title_fontcolor":"None",\
"legend_orientation":"vertical",\
"traceorders":STANDARD_TRACEORDERS,\
"legend_traceorder":"normal",\
"legend_tracegroupgap":"10",\
"legend_y":"1",\
"legend_x":"1.02",\
"legend_xanchor":"left",\
"legend_yanchor":"auto",\
"legend_xanchors":STANDARD_LEGEND_XANCHORS,\
"legend_yanchors":STANDARD_LEGEND_YANCHORS,\
"legend_valign":"middle",\
"valignments":STANDARD_VERTICAL_ALIGNMENTS,\
"sides":STANDARD_SIDES,\
"legend_side":"left",\
"download_format":["png","pdf","svg"],\
"downloadf":"pdf",\
"downloadn":"ihistogram",\
"session_downloadn":"MySession.ihistogram.plot",\
"inputsessionfile":"Select file..",\
"session_argumentsn":"MyArguments.ihistogram.plot",\
"inputargumentsfile":"Select file.."}
return plot_arguments | 42.108059 | 170 | 0.610587 |
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from collections import OrderedDict
import numpy as np
import sys
def GET_COLOR(x):
if str(x)[:3].lower() == "rgb":
vals=x.split("rgb(")[-1].split(")")[0].split(",")
vals=[ float(s.strip(" ")) for s in vals ]
return vals
else:
return str(x)
def make_figure(df,pa):
tmp=df.copy()
tmp=tmp[pa["vals"]]
fig = go.Figure( )
pab={}
for arg in ["show_legend","upper_axis","lower_axis","left_axis","right_axis","errorbar",\
"errorbar_symmetric","tick_left_axis","tick_lower_axis","tick_upper_axis","tick_right_axis",\
"kde","show_hist","show_curve","show_rug"]:
if pa[arg] in ["off",".off"]:
pab[arg]=False
else:
pab[arg]=True
floats=["bin_size","errorbar_value","errorbar_thickness","errorbar_width","x","y","axis_line_width","ticks_line_width",\
"ticks_length","x_lower_limit","x_upper_limit","y_lower_limit","y_upper_limit","spikes_thickness","xticks_rotation",\
"yticks_rotation","xticks_fontsize","yticks_fontsize","grid_width","legend_borderwidth","legend_tracegroupgap","legend_x",\
"legend_y","fig_width","fig_height"]
for a in floats:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=float(pa[a])
integers=["label_fontsize","legend_fontsize","legend_title_fontsize","title_fontsize","maxxticks","maxyticks"]
for a in integers:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=int(pa[a])
possible_nones=["errorbar_color","title_fontcolor","axis_line_color","ticks_color","spikes_color","label_fontcolor",\
"paper_bgcolor","plot_bgcolor","grid_color","legend_bgcolor","legend_bordercolor","legend_fontcolor","legend_title_fontcolor",\
"title_fontfamily","label_fontfamily","legend_fontfamily","legend_title_fontfamily"]
for p in possible_nones:
if pa[p] == "None" or pa[p]=="Default" :
pab[p]=None
else:
pab[p]=pa[p]
if pab["kde"]==True:
colors=list()
if pa["rug_text"]!="":
rug_text=pa["rug_text"].split(",")
else:
rug_text=[]
for h in pa["groups_settings"].values():
if h["color_rgb"] == "":
if h["color_value"]=="None":
colors.append(None)
else:
colors.append(h["color_value"])
else:
colors.append(GET_COLOR(h["color_rgb"]))
hist_data=[]
for col in tmp.columns:
hist_data.append(tmp[col].dropna())
if (not pab["show_hist"]) & (not pab["show_curve"]):
pa["show_curve"]="on"
pab["show_curve"]=True
fig=ff.create_distplot(hist_data=hist_data, group_labels=pa["vals"],curve_type=pa["curve_type"],show_hist=pab["show_hist"],\
show_curve=pab["show_curve"],show_rug=pab["show_rug"],bin_size=pab["bin_size"],rug_text=rug_text,colors=colors, histnorm=pa["kde_histnorm"])
else:
for h in pa["groups_settings"].values():
h_=dict()
integers=["hover_fontsize","bins_number"]
for a in integers:
if h[a] == "" or h[a]=="None" or h[a] == None:
h_[a]=None
else:
h_[a]=int(h[a])
possible_nones=["hover_bgcolor","hover_bordercolor","hover_fontfamily","hover_fontcolor"]
for p in possible_nones:
if h[p] == "None" or h[p]=="Default" :
h_[p]=None
else:
h_[p]=h[p]
floats=["opacity","linewidth"]
for a in floats:
if h[a] == "":
h_[a]=None
else:
h_[a]=float(h[a])
if h["label"]!="":
name=h["label"]
else:
name=""
if h["text"]!="":
text=h["text"]
else:
text=""
if h["color_rgb"] == "":
if h["color_value"]=="None":
marker_color=None
else:
marker_color = h["color_value"]
else:
marker_color = GET_COLOR( h["color_rgb"] )
if h["line_rgb"] == "":
if h["line_color"]=="None":
line_color=None
else:
line_color = h["line_color"]
else:
line_color = GET_COLOR( h["line_rgb"] )
if h["histnorm"] == "None":
histnorm = ""
else:
histnorm = h["histnorm"]
if h["cumulative"]=="on":
cumulative_enabled=True
else:
cumulative_enabled=False
marker=dict(color=marker_color,line=dict(width=h_["linewidth"],color=line_color))
cumulative=dict(enabled=cumulative_enabled,direction=h["cumulative_direction"])
hoverlabel=dict(bgcolor=h_["hover_bgcolor"],bordercolor=h_["hover_bordercolor"],align=h["hover_align"],\
font=dict(family=h_["hover_fontfamily"],size=h_["hover_fontsize"],color=h_["hover_fontcolor"]))
if pab["errorbar"]==True:
errorbar=True
if h["orientation_value"]=="vertical":
if pab["errorbar"]==True:
error_y=dict(visible=errorbar,value=pab["errorbar_value"],type=pa["errorbar_type"],symmetric=pab["errorbar_symmetric"],color=pab["errorbar_color"],\
thickness=pab["errorbar_thickness"],width=pab["errorbar_width"])
else:
error_y=dict(visible=False)
fig.add_trace(go.Histogram(x=tmp[h["name"]].dropna(),text=text,hoverinfo=h["hoverinfo"],histfunc=h["histfunc"],cumulative=cumulative,\
opacity=h_["opacity"],nbinsx=h_["bins_number"],name=name,marker=marker,error_y=error_y,hoverlabel=hoverlabel,histnorm=histnorm))
elif h["orientation_value"]=="horizontal":
if pab["errorbar"]==True:
error_x=dict(visible=errorbar,value=pab["errorbar_value"],type=pa["errorbar_type"],symmetric=pab["errorbar_symmetric"],color=pab["errorbar_color"],\
thickness=pab["errorbar_thickness"],width=pab["errorbar_width"])
else:
error_x=dict(visible=False)
fig.add_trace(go.Histogram(y=tmp[h["name"]].dropna(),text=text,hoverinfo=h["hoverinfo"],histfunc=h["histfunc"],cumulative=cumulative,\
opacity=h_["opacity"],nbinsy=h_["bins_number"],name=name,marker=marker,error_x=error_x,hoverlabel=hoverlabel,histnorm=histnorm))
fig.update_layout( width=pab["fig_width"], height=pab["fig_height"] )
title=dict(text=pa["title"],font=dict(family=pab["title_fontfamily"],size=pab["title_fontsize"],color=pab["title_fontcolor"]),\
xref=pa["xref"],yref=pa["yref"],x=pab["x"],y=pab["y"],xanchor=pa["title_xanchor"],yanchor=pa["title_yanchor"])
fig.update_layout(title=title,barmode=pa["barmode"])
if pa["log_scale"]==True and pa["orientation"]=="vertical":
fig.update_yaxes(type="log")
elif pa["log_scale"]==True and pa["orientation"]=="horizontal":
fig.update_xaxes(type="log")
fig.update_xaxes(zeroline=False, showline=pab["lower_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"], mirror=pab["upper_axis"])
fig.update_yaxes(zeroline=False, showline=pab["left_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"],mirror=pab["right_axis"])
if pab["tick_lower_axis"]==False and pab["tick_right_axis"]==False and pab["tick_left_axis"]==False and pab["tick_upper_axis"]==False:
pa["ticks_direction_value"]=""
ticks=""
else:
ticks=pa["ticks_direction_value"]
fig.update_xaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
fig.update_yaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
if pab["tick_upper_axis"] :
fig.update_xaxes(mirror="ticks")
if pab["tick_right_axis"]:
fig.update_yaxes(mirror="ticks")
if (pa["x_lower_limit"]!="") and (pa["x_upper_limit"]!="") :
xmin=pab["x_lower_limit"]
xmax=pab["x_upper_limit"]
fig.update_xaxes(range=[xmin, xmax])
if (pa["y_lower_limit"]!="") and (pa["y_upper_limit"]!="") :
ymin=pab["y_lower_limit"]
ymax=pab["y_upper_limit"]
fig.update_yaxes(range=[ymin, ymax])
if pa["maxxticks"]!="":
fig.update_xaxes(nticks=pab["maxxticks"])
if pa["maxyticks"]!="":
fig.update_yaxes(nticks=pab["maxyticks"])
if pa["spikes_value"]=="both":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="x":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="y":
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="None":
fig.update_xaxes(showspikes=None)
fig.update_yaxes(showspikes=None)
xaxis=dict(visible=True, title=dict(text=pa["xlabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
yaxis=dict(visible=True, title=dict(text=pa["ylabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
fig.update_layout(paper_bgcolor=pab["paper_bgcolor"],plot_bgcolor=pab["plot_bgcolor"],xaxis = xaxis,yaxis = yaxis)
fig.update_xaxes(tickangle=pab["xticks_rotation"], tickfont=dict(size=pab["xticks_fontsize"]))
fig.update_yaxes(tickangle=pab["yticks_rotation"], tickfont=dict(size=pab["yticks_fontsize"]))
if pa["grid_value"] == "None":
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=False)
elif pa["grid_value"]=="x":
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="y":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="both":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_layout(template='plotly_white')
if pab["show_legend"]==True:
if pa["legend_orientation"]=="vertical":
legend_orientation="v"
elif pa["legend_orientation"]=="horizontal":
legend_orientation="h"
fig.update_layout(showlegend=True,legend=dict(x=pab["legend_x"],y=pab["legend_y"],bgcolor=pab["legend_bgcolor"],bordercolor=pab["legend_bordercolor"],\
borderwidth=pab["legend_borderwidth"],valign=pa["legend_valign"],\
font=dict(family=pab["legend_fontfamily"],size=pab["legend_fontsize"],color=pab["legend_fontcolor"]),orientation=legend_orientation,\
traceorder=pa["legend_traceorder"],tracegroupgap=pab["legend_tracegroupgap"],\
title=dict(text=pa["legend_title"],side=pa["legend_side"],font=dict(family=pab["legend_title_fontfamily"],size=pab["legend_title_fontsize"],\
color=pab["legend_title_fontcolor"]))))
else:
fig.update_layout(showlegend=False)
return fig
STANDARD_SIZES=[str(i) for i in list(range(1,101))]
STANDARD_COLORS=["None","aliceblue","antiquewhite","aqua","aquamarine","azure","beige",\
"bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood",\
"cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk",\
"crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgrey",\
"darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid",\
"darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey",\
"darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue",\
"firebrick","floralwhite","forestgreen","fuchsia","gainsboro","ghostwhite","gold",\
"goldenrod","gray","grey","green","greenyellow","honeydew","hotpink","indianred","indigo",\
"ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral",\
"lightcyan","lightgoldenrodyellow","lightgray","lightgrey","lightgreen","lightpink","lightsalmon",\
"lightseagreen","lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow",\
"lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid",\
"mediumpurple","mediumseagreen","mediumslateblue","mediumspringgreen","mediumturquoise",\
"mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy",\
"oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen",\
"paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue",\
"purple","red","rosybrown","royalblue","rebeccapurple","saddlebrown","salmon","sandybrown",\
"seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","slategrey","snow",\
"springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white",\
"whitesmoke","yellow","yellowgreen"]
STANDARD_HISTNORMS=['None', 'percent', 'probability', 'density', 'probability density']
LINE_STYLES=["solid", "dot", "dash", "longdash", "dashdot","longdashdot"]
STANDARD_BARMODES=["stack", "group","overlay","relative"]
STANDARD_ORIENTATIONS=['vertical','horizontal']
STANDARD_ALIGNMENTS=["left","right","auto"]
STANDARD_VERTICAL_ALIGNMENTS=["top", "middle","bottom"]
STANDARD_FONTS=["Arial", "Balto", "Courier New", "Default", "Droid Sans", "Droid Serif", "Droid Sans Mono",\
"Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman"]
TICKS_DIRECTIONS=["inside","outside",'']
LEGEND_LOCATIONS=['best','upper right','upper left','lower left','lower right','right','center left','center right','lower center','upper center','center']
MODES=["expand",None]
STANDARD_HOVERINFO=["x", "y", "z", "text", "name","all","none","skip","x+y","x+text","x+name",\
"y+text","y+name","text+name","x+y+name","x+y+text","x+text+name","y+text+name"]
STANDARD_HISTFUNC=["count","sum","avg","min","max"]
STANDARD_CUMULATIVE_DIRECTIONS=["increasing","decreasing"]
STANDARD_ERRORBAR_TYPES=["percent","constant","sqrt"]
STANDARD_REFERENCES=["container","paper"]
STANDARD_TITLE_XANCHORS=["auto","left","center","right"]
STANDARD_TITLE_YANCHORS=["top","middle","bottom"]
STANDARD_LEGEND_XANCHORS=["auto","left","center","right"]
STANDARD_LEGEND_YANCHORS=["auto","top","middle","bottom"]
STANDARD_TRACEORDERS=["reversed", "grouped", "reversed+grouped", "normal"]
STANDARD_SIDES=["top","left","top left"]
STANDARD_SPIKEMODES=["toaxis", "across", "marker","toaxis+across","toaxis+marker","across+marker","toaxis+across+marker"]
STANDARD_CURVETYPES=["kde","normal"]
def figure_defaults():
plot_arguments={"fig_width":"600",\
"fig_height":"600",\
"title":'iHistogram',\
"title_fontsize":"20",\
"title_fontfamily":"Default",\
"title_fontcolor":"None",\
"titles":"20",\
"kde":".off",\
"curve_type":"kde",\
"curve_types":STANDARD_CURVETYPES,\
"kde_histnorm":"probability density",\
"kde_histnorms":["probability density","probability"],\
"show_hist":".off",\
"show_curve":".on",\
"show_rug":".off",\
"rug_text":"",\
"bin_size":"1",\
"opacity":0.8,\
"paper_bgcolor":"white",\
"plot_bgcolor":"white",\
"hoverinfos":STANDARD_HOVERINFO,\
"hover_alignments":STANDARD_ALIGNMENTS,\
"histfuncs":STANDARD_HISTFUNC,\
"references":STANDARD_REFERENCES,\
"xref":"container",\
"yref":"container",\
"x":"0.5",\
"y":"0.9",\
"title_xanchors":STANDARD_TITLE_XANCHORS,\
"title_yanchors":STANDARD_TITLE_YANCHORS,\
"title_xanchor":"auto",\
"title_yanchor":"auto",\
"show_legend":"on",\
"errorbar":".off",\
"errorbar_value":"10",\
"errorbar_type":"percent",\
"errorbar_types":STANDARD_ERRORBAR_TYPES,\
"errorbar_symmetric":".off",\
"errorbar_color":"darkgrey",\
"errorbar_width":"2",\
"errorbar_thickness":"2",\
"axis_line_width":1.0,\
"axis_line_color":"lightgrey",\
"ticks_line_width":1.0,\
"ticks_color":"lightgrey",\
"cols":[],\
"groups":[],\
"vals":[],\
"groups_settings":dict(),\
"log_scale":".off",\
"fonts":STANDARD_FONTS,\
"cumulative_directions":STANDARD_CUMULATIVE_DIRECTIONS,\
"colors":STANDARD_COLORS,\
"histnorms":STANDARD_HISTNORMS,\
"barmode":"overlay",\
"barmodes":STANDARD_BARMODES,\
"histtype_value":"bar",\
"linestyles":LINE_STYLES,\
"linestyle_value":"",\
"orientations":STANDARD_ORIENTATIONS, \
"fontsizes":STANDARD_SIZES,\
"xlabel_size":STANDARD_SIZES,\
"ylabel_size":STANDARD_SIZES,\
"xlabel":"",\
"ylabel":"",\
"label_fontfamily":"Default",\
"label_fontsize":"15",\
"label_fontcolor":"None",\
"xlabels":"14",\
"ylabels":"14",\
"left_axis":".on" ,\
"right_axis":".on",\
"upper_axis":".on",\
"lower_axis":".on",\
"tick_left_axis":".on" ,\
"tick_right_axis":".off",\
"tick_upper_axis":".off",\
"tick_lower_axis":".on",\
"ticks_direction":TICKS_DIRECTIONS,\
"ticks_direction_value":TICKS_DIRECTIONS[1],\
"ticks_length":"6.0",\
"xticks_fontsize":"14",\
"yticks_fontsize":"14",\
"xticks_rotation":"0",\
"yticks_rotation":"0",\
"x_lower_limit":"",\
"y_lower_limit":"",\
"x_upper_limit":"",\
"y_upper_limit":"",\
"maxxticks":"",\
"maxyticks":"",\
"spikes":["None","both","x","y"],\
"spikes_value":"None",\
"spikes_color":"None",\
"spikes_thickness":"3.0",\
"dashes":LINE_STYLES,\
"spikes_dash":"dash",\
"spikes_mode":"toaxis",\
"spikes_modes":STANDARD_SPIKEMODES,\
"grid":["None","both","x","y"],\
"grid_value":"None",\
"grid_width":"1",\
"grid_color":"lightgrey",\
"legend_title":"",\
"legend_bgcolor":"None",\
"legend_borderwidth":"0",\
"legend_bordercolor":"None",\
"legend_fontfamily":"Default",\
"legend_fontsize":"12",\
"legend_fontcolor":"None",\
"legend_title_fontfamily":"Default",\
"legend_title_fontsize":"12",\
"legend_title_fontcolor":"None",\
"legend_orientation":"vertical",\
"traceorders":STANDARD_TRACEORDERS,\
"legend_traceorder":"normal",\
"legend_tracegroupgap":"10",\
"legend_y":"1",\
"legend_x":"1.02",\
"legend_xanchor":"left",\
"legend_yanchor":"auto",\
"legend_xanchors":STANDARD_LEGEND_XANCHORS,\
"legend_yanchors":STANDARD_LEGEND_YANCHORS,\
"legend_valign":"middle",\
"valignments":STANDARD_VERTICAL_ALIGNMENTS,\
"sides":STANDARD_SIDES,\
"legend_side":"left",\
"download_format":["png","pdf","svg"],\
"downloadf":"pdf",\
"downloadn":"ihistogram",\
"session_downloadn":"MySession.ihistogram.plot",\
"inputsessionfile":"Select file..",\
"session_argumentsn":"MyArguments.ihistogram.plot",\
"inputargumentsfile":"Select file.."}
return plot_arguments | true | true |
f72502fa59d5dbbaf1359af738eaf27afc125199 | 2,374 | py | Python | .history/spider/pokemon_spider_20201213130808.py | KustomApe/ksie | d6f97d0298d04d06788563546c66ff50c6bb2d31 | [
"MIT"
] | 1 | 2021-12-11T04:50:25.000Z | 2021-12-11T04:50:25.000Z | .history/spider/pokemon_spider_20201213130808.py | KustomApe/ksie | d6f97d0298d04d06788563546c66ff50c6bb2d31 | [
"MIT"
] | null | null | null | .history/spider/pokemon_spider_20201213130808.py | KustomApe/ksie | d6f97d0298d04d06788563546c66ff50c6bb2d31 | [
"MIT"
] | null | null | null | from selenium import webdriver
import pandas as pd
import time
"""[注意事項]
robot.txtを必ず読んで、ルールに沿った形でクローリングするように気をつけてください。
あくまで自己責任でお願いできればと思います。
"""
"""[Initial Setting]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
df = pd.DataFrame(columns=['ranking', 'name', 'image'])
url = 'https://swsh.pokedb.tokyo/pokemon/list/'
"""[CSS Selector Setting]
CSSセレクターの設定
"""
PAGER_NEXT = "li.select-page.arrow a[rel='next']"
POSTS = ".product-item-list__item"
RANKING = ".pokemon-ranking-rank"
NAME = ".product-item-list__item-name"
IMAGE = ".product-item-list__item-image img"
PRICE = ".product-item-list__item-price"
CATEGORY = ".product-item-list__item-category"
CAR = ".product-item-list__item-car-name"
"""[Activate Section]
実行部分
"""
browser.get(url)
while True: #Continue until getting the last page.
if len(browser.find_elements_by_css_selector(PAGER_NEXT)) > 0:
print('Starting to get posts...')
posts = browser.find_elements_by_css_selector(POSTS)
print(len(posts))
for post in posts:
try:
name = post.find_element_by_css_selector(PRODUCT_NAME).text
print(name)
thumbnailURL = post.find_element_by_css_selector(IMAGE).get_attribute('src')
print(thumbnailURL)
price = post.find_element_by_css_selector(PRICE).text
print(price)
category = post.find_element_by_css_selector(CATEGORY).text
print(category)
car = post.find_element_by_css_selector(CAR).text
print(car)
se = pd.Series([name, thumbnailURL, price, category, car], ['name', 'image', 'price', 'category', 'car'])
df = df.append(se, ignore_index=True)
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER_NEXT).get_attribute('href')
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page.')
else:
print('No pager exist anymore...')
break
print('Finished Crawling. Writing out to CSV file...')
df.to_csv('car_parts.csv')
print('Done')
| 33.43662 | 121 | 0.655013 | from selenium import webdriver
import pandas as pd
import time
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
df = pd.DataFrame(columns=['ranking', 'name', 'image'])
url = 'https://swsh.pokedb.tokyo/pokemon/list/'
PAGER_NEXT = "li.select-page.arrow a[rel='next']"
POSTS = ".product-item-list__item"
RANKING = ".pokemon-ranking-rank"
NAME = ".product-item-list__item-name"
IMAGE = ".product-item-list__item-image img"
PRICE = ".product-item-list__item-price"
CATEGORY = ".product-item-list__item-category"
CAR = ".product-item-list__item-car-name"
browser.get(url)
while True:
if len(browser.find_elements_by_css_selector(PAGER_NEXT)) > 0:
print('Starting to get posts...')
posts = browser.find_elements_by_css_selector(POSTS)
print(len(posts))
for post in posts:
try:
name = post.find_element_by_css_selector(PRODUCT_NAME).text
print(name)
thumbnailURL = post.find_element_by_css_selector(IMAGE).get_attribute('src')
print(thumbnailURL)
price = post.find_element_by_css_selector(PRICE).text
print(price)
category = post.find_element_by_css_selector(CATEGORY).text
print(category)
car = post.find_element_by_css_selector(CAR).text
print(car)
se = pd.Series([name, thumbnailURL, price, category, car], ['name', 'image', 'price', 'category', 'car'])
df = df.append(se, ignore_index=True)
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER_NEXT).get_attribute('href')
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page.')
else:
print('No pager exist anymore...')
break
print('Finished Crawling. Writing out to CSV file...')
df.to_csv('car_parts.csv')
print('Done')
| true | true |
f72503d39b41bc560c31dfc0d1965fa96e277d2c | 2,467 | py | Python | data/image_folder.py | OnizukaLab/pytorch-CycleGAN-and-pix2pix | 95b8dfe8bba43ae5ec9d7b299107fc155e7939c0 | [
"BSD-3-Clause"
] | null | null | null | data/image_folder.py | OnizukaLab/pytorch-CycleGAN-and-pix2pix | 95b8dfe8bba43ae5ec9d7b299107fc155e7939c0 | [
"BSD-3-Clause"
] | 2 | 2019-07-30T09:02:40.000Z | 2019-08-01T11:36:44.000Z | data/image_folder.py | OnizukaLab/pytorch-CycleGAN-and-pix2pix | 95b8dfe8bba43ae5ec9d7b299107fc155e7939c0 | [
"BSD-3-Clause"
] | null | null | null | """A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import os.path
import re
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
PATTERN = re.compile(r".*?([0-9]+)\.(jpg|JPG|jpeg|JPEG|png|PNG|ppm|PPM|bmp|BMP)$")
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def make_numbering_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
m = PATTERN.match(fname)
if m is not None:
idx = int(m.group(1))
path = os.path.join(root, fname)
images.append((idx, path))
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 29.369048 | 122 | 0.602756 |
import torch.utils.data as data
from PIL import Image
import os
import os.path
import re
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
PATTERN = re.compile(r".*?([0-9]+)\.(jpg|JPG|jpeg|JPEG|png|PNG|ppm|PPM|bmp|BMP)$")
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def make_numbering_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
m = PATTERN.match(fname)
if m is not None:
idx = int(m.group(1))
path = os.path.join(root, fname)
images.append((idx, path))
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| true | true |
f725054c84988206eb2120605f89dfc44d68a15d | 8,431 | py | Python | recipes/libxslt/all/conanfile.py | aapng/conan-center-index | d68a8fbb938402a5a53fa6b0214c49ccf878f8a9 | [
"MIT"
] | null | null | null | recipes/libxslt/all/conanfile.py | aapng/conan-center-index | d68a8fbb938402a5a53fa6b0214c49ccf878f8a9 | [
"MIT"
] | 1 | 2021-05-12T10:46:25.000Z | 2021-05-13T06:12:41.000Z | recipes/libxslt/all/conanfile.py | aapng/conan-center-index | d68a8fbb938402a5a53fa6b0214c49ccf878f8a9 | [
"MIT"
] | 2 | 2020-10-24T00:42:55.000Z | 2021-01-26T09:01:14.000Z | import glob
import os
from conans import ConanFile, tools, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment
class LibxsltConan(ConanFile):
name = "libxslt"
url = "https://github.com/conan-io/conan-center-index"
description = "libxslt is a software library implementing XSLT processor, based on libxml2"
topics = ("XSLT", "processor")
homepage = "https://xmlsoft.org"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
default_options = {'shared': False,
'fPIC': True,
"debugger": False,
"crypto": False,
"profiler": False,
"plugins": False}
options = {name: [True, False] for name in default_options.keys()}
_option_names = [name for name in default_options.keys() if name not in ["shared", "fPIC"]]
_source_subfolder = "source_subfolder"
exports_sources = "patches/**"
def requirements(self):
self.requires("libxml2/2.9.10")
@property
def _is_msvc(self):
return self.settings.compiler == 'Visual Studio'
@property
def _full_source_subfolder(self):
return os.path.join(self.source_folder, self._source_subfolder)
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("libxslt-{0}".format(self.version), self._source_subfolder)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
if self._is_msvc:
self._build_windows()
else:
self._build_with_configure()
def _build_windows(self):
with tools.chdir(os.path.join(self._full_source_subfolder, 'win32')):
debug = "yes" if self.settings.build_type == "Debug" else "no"
static = "no" if self.options.shared else "yes"
with tools.vcvars(self.settings):
args = ["cscript",
"configure.js",
"compiler=msvc",
"prefix=%s" % self.package_folder,
"cruntime=/%s" % self.settings.compiler.runtime,
"debug=%s" % debug,
"static=%s" % static,
'include="%s"' % ";".join(self.deps_cpp_info.include_paths),
'lib="%s"' % ";".join(self.deps_cpp_info.lib_paths),
'iconv=no',
'xslt_debug=no']
for name in self._option_names:
cname = {"plugins": "modules"}.get(name, name)
value = getattr(self.options, name)
value = "yes" if value else "no"
args.append("%s=%s" % (cname, value))
configure_command = ' '.join(args)
self.output.info(configure_command)
self.run(configure_command)
# Fix library names because they can be not just zlib.lib
def format_libs(package):
libs = []
for lib in self.deps_cpp_info[package].libs:
libname = lib
if not libname.endswith('.lib'):
libname += '.lib'
libs.append(libname)
for lib in self.deps_cpp_info[package].system_libs:
libname = lib
if not libname.endswith('.lib'):
libname += '.lib'
libs.append(libname)
return ' '.join(libs)
def fix_library(option, package, old_libname):
if option:
tools.replace_in_file("Makefile.msvc",
"LIBS = %s" % old_libname,
"LIBS = %s" % format_libs(package))
if "icu" in self.deps_cpp_info.deps:
fix_library(True, 'icu', 'wsock32.lib')
tools.replace_in_file("Makefile.msvc", "libxml2.lib", format_libs("libxml2"))
tools.replace_in_file("Makefile.msvc", "libxml2_a.lib", format_libs("libxml2"))
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
self.run("nmake /f Makefile.msvc install")
def _build_with_configure(self):
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
full_install_subfolder = tools.unix_path(self.package_folder)
# fix rpath
if self.settings.os == "Macos":
tools.replace_in_file(os.path.join(self._full_source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ")
configure_args = ['--with-python=no', '--prefix=%s' % full_install_subfolder]
if self.options.shared:
configure_args.extend(['--enable-shared', '--disable-static'])
else:
configure_args.extend(['--enable-static', '--disable-shared'])
xml_config = tools.unix_path(self.deps_cpp_info["libxml2"].rootpath) + "/bin/xml2-config"
configure_args.append('XML_CONFIG=%s' % xml_config)
for name in self._option_names:
value = getattr(self.options, name)
value = ("--with-%s" % name) if value else ("--without-%s" % name)
configure_args.append(value)
# Disable --build when building for iPhoneSimulator. The configure script halts on
# not knowing if it should cross-compile.
build = None
if self.settings.os == "iOS" and self.settings.arch == "x86_64":
build = False
env_build.configure(args=configure_args, build=build, configure_dir=self._full_source_subfolder)
env_build.make(args=["install", "V=1"])
def package(self):
self.copy("COPYING", src=self._full_source_subfolder, dst="licenses", ignore_case=True, keep_path=False)
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
if self.settings.os == "Windows":
# There is no way to avoid building the tests, but at least we don't want them in the package
for prefix in ["run", "test"]:
for test in glob.glob("%s/bin/%s*" % (self.package_folder, prefix)):
os.remove(test)
if self.settings.compiler == "Visual Studio":
if self.settings.build_type == "Debug":
os.unlink(os.path.join(self.package_folder, "bin", "libexslt.pdb"))
os.unlink(os.path.join(self.package_folder, "bin", "libxslt.pdb"))
os.unlink(os.path.join(self.package_folder, "bin", "xsltproc.pdb"))
if self.options.shared:
os.unlink(os.path.join(self.package_folder, "lib", "libxslt_a.lib"))
os.unlink(os.path.join(self.package_folder, "lib", "libexslt_a.lib"))
else:
os.unlink(os.path.join(self.package_folder, "lib", "libxslt.lib"))
os.unlink(os.path.join(self.package_folder, "lib", "libexslt.lib"))
os.unlink(os.path.join(self.package_folder, "bin", "libxslt.dll"))
os.unlink(os.path.join(self.package_folder, "bin", "libexslt.dll"))
for f in "libxslt.la", "libexslt.la":
la = os.path.join(self.package_folder, 'lib', f)
if os.path.isfile(la):
os.unlink(la)
def package_info(self):
self.cpp_info.libs = ['exslt', 'xslt']
if self._is_msvc:
if self.options.shared:
self.cpp_info.libs = ['lib%s' % l for l in self.cpp_info.libs]
else:
self.cpp_info.libs = ['lib%s_a' % l for l in self.cpp_info.libs]
self.cpp_info.includedirs.append(os.path.join("include", "libxslt"))
if self.settings.os == "Linux" or self.settings.os == "Macos":
self.cpp_info.system_libs.append('m')
if self.settings.os == "Windows":
self.cpp_info.system_libs.append('ws2_32')
| 45.572973 | 134 | 0.562448 | import glob
import os
from conans import ConanFile, tools, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment
class LibxsltConan(ConanFile):
name = "libxslt"
url = "https://github.com/conan-io/conan-center-index"
description = "libxslt is a software library implementing XSLT processor, based on libxml2"
topics = ("XSLT", "processor")
homepage = "https://xmlsoft.org"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
default_options = {'shared': False,
'fPIC': True,
"debugger": False,
"crypto": False,
"profiler": False,
"plugins": False}
options = {name: [True, False] for name in default_options.keys()}
_option_names = [name for name in default_options.keys() if name not in ["shared", "fPIC"]]
_source_subfolder = "source_subfolder"
exports_sources = "patches/**"
def requirements(self):
self.requires("libxml2/2.9.10")
@property
def _is_msvc(self):
return self.settings.compiler == 'Visual Studio'
@property
def _full_source_subfolder(self):
return os.path.join(self.source_folder, self._source_subfolder)
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("libxslt-{0}".format(self.version), self._source_subfolder)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
if self._is_msvc:
self._build_windows()
else:
self._build_with_configure()
def _build_windows(self):
with tools.chdir(os.path.join(self._full_source_subfolder, 'win32')):
debug = "yes" if self.settings.build_type == "Debug" else "no"
static = "no" if self.options.shared else "yes"
with tools.vcvars(self.settings):
args = ["cscript",
"configure.js",
"compiler=msvc",
"prefix=%s" % self.package_folder,
"cruntime=/%s" % self.settings.compiler.runtime,
"debug=%s" % debug,
"static=%s" % static,
'include="%s"' % ";".join(self.deps_cpp_info.include_paths),
'lib="%s"' % ";".join(self.deps_cpp_info.lib_paths),
'iconv=no',
'xslt_debug=no']
for name in self._option_names:
cname = {"plugins": "modules"}.get(name, name)
value = getattr(self.options, name)
value = "yes" if value else "no"
args.append("%s=%s" % (cname, value))
configure_command = ' '.join(args)
self.output.info(configure_command)
self.run(configure_command)
def format_libs(package):
libs = []
for lib in self.deps_cpp_info[package].libs:
libname = lib
if not libname.endswith('.lib'):
libname += '.lib'
libs.append(libname)
for lib in self.deps_cpp_info[package].system_libs:
libname = lib
if not libname.endswith('.lib'):
libname += '.lib'
libs.append(libname)
return ' '.join(libs)
def fix_library(option, package, old_libname):
if option:
tools.replace_in_file("Makefile.msvc",
"LIBS = %s" % old_libname,
"LIBS = %s" % format_libs(package))
if "icu" in self.deps_cpp_info.deps:
fix_library(True, 'icu', 'wsock32.lib')
tools.replace_in_file("Makefile.msvc", "libxml2.lib", format_libs("libxml2"))
tools.replace_in_file("Makefile.msvc", "libxml2_a.lib", format_libs("libxml2"))
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
self.run("nmake /f Makefile.msvc install")
def _build_with_configure(self):
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
full_install_subfolder = tools.unix_path(self.package_folder)
if self.settings.os == "Macos":
tools.replace_in_file(os.path.join(self._full_source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ")
configure_args = ['--with-python=no', '--prefix=%s' % full_install_subfolder]
if self.options.shared:
configure_args.extend(['--enable-shared', '--disable-static'])
else:
configure_args.extend(['--enable-static', '--disable-shared'])
xml_config = tools.unix_path(self.deps_cpp_info["libxml2"].rootpath) + "/bin/xml2-config"
configure_args.append('XML_CONFIG=%s' % xml_config)
for name in self._option_names:
value = getattr(self.options, name)
value = ("--with-%s" % name) if value else ("--without-%s" % name)
configure_args.append(value)
build = None
if self.settings.os == "iOS" and self.settings.arch == "x86_64":
build = False
env_build.configure(args=configure_args, build=build, configure_dir=self._full_source_subfolder)
env_build.make(args=["install", "V=1"])
def package(self):
self.copy("COPYING", src=self._full_source_subfolder, dst="licenses", ignore_case=True, keep_path=False)
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
if self.settings.os == "Windows":
for prefix in ["run", "test"]:
for test in glob.glob("%s/bin/%s*" % (self.package_folder, prefix)):
os.remove(test)
if self.settings.compiler == "Visual Studio":
if self.settings.build_type == "Debug":
os.unlink(os.path.join(self.package_folder, "bin", "libexslt.pdb"))
os.unlink(os.path.join(self.package_folder, "bin", "libxslt.pdb"))
os.unlink(os.path.join(self.package_folder, "bin", "xsltproc.pdb"))
if self.options.shared:
os.unlink(os.path.join(self.package_folder, "lib", "libxslt_a.lib"))
os.unlink(os.path.join(self.package_folder, "lib", "libexslt_a.lib"))
else:
os.unlink(os.path.join(self.package_folder, "lib", "libxslt.lib"))
os.unlink(os.path.join(self.package_folder, "lib", "libexslt.lib"))
os.unlink(os.path.join(self.package_folder, "bin", "libxslt.dll"))
os.unlink(os.path.join(self.package_folder, "bin", "libexslt.dll"))
for f in "libxslt.la", "libexslt.la":
la = os.path.join(self.package_folder, 'lib', f)
if os.path.isfile(la):
os.unlink(la)
def package_info(self):
self.cpp_info.libs = ['exslt', 'xslt']
if self._is_msvc:
if self.options.shared:
self.cpp_info.libs = ['lib%s' % l for l in self.cpp_info.libs]
else:
self.cpp_info.libs = ['lib%s_a' % l for l in self.cpp_info.libs]
self.cpp_info.includedirs.append(os.path.join("include", "libxslt"))
if self.settings.os == "Linux" or self.settings.os == "Macos":
self.cpp_info.system_libs.append('m')
if self.settings.os == "Windows":
self.cpp_info.system_libs.append('ws2_32')
| true | true |
f72505f9706d238ac6c8305129e9adec3227f5ac | 2,455 | py | Python | Software for Proof of Work/SourceCode.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | Software for Proof of Work/SourceCode.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | Software for Proof of Work/SourceCode.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | """
Readme.txt:
Required modules: random, sys, hashlib, sha3
Please read the comments below for further explanation.
"""
from random import *
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
def serialnoncegenerator(): # To generate uniformly randomly 128-bit integer
serial = str(randint(0, 2**128 - 1))
return serial
def payee(): # To generate a payee name arbitrarily.
payee = ""
for i in range(10):
num = randint(48, 90)
while(num > 57 and num < 65):
num = randint(48, 90)
payee += chr(num)
return payee
def satoshi(): # To generate a satoshi amount arbitrarily.
return str(randint(1, 999))
def PoWGenerator(transaction): # To generate a valid Proof of Work
new_tr = ""
PoW = ""
while True:
nonce = serialnoncegenerator()
noncestr = "Nonce: " + nonce + "\n"
new_tr = transaction + noncestr # Transaction is updated adding Nonce line.
PoW = hashlib.sha3_256(new_tr).hexdigest()
if PoW[:6] == "000000": # While the first 6 digits of the hash digest is not 0,
break # nonce value is changed and the transaction is hashed again.
trPoW = "Proof of Work: " + PoW + "\n"
new_tr = new_tr + trPoW # Transaction is updated adding PoW line.
return (PoW,new_tr) # Returning PoW and valid transaction.
# To generate a transaction text excluding Nonce and PoW lines.
def trWoutLastTwoLines(prevHash):
transaction = \
"*** Bitcoin transaction ***" + "\n" + \
"Serial number: " + serialnoncegenerator() + "\n" + \
"Payer: User Name" + "\n" + \
"Payee: " + payee() + "\n" + \
"Amount: " + satoshi() + " Satoshi" + "\n" + \
"Previous hash in the chain: " + prevHash + "\n"
return transaction
result = []
prevHash = "" # The hash of the previous transaction.
for i in range(10): # To generate 10 transactions.
if i == 0:
prevHash = "First transaction"
transaction = trWoutLastTwoLines(prevHash) # Generate a transaction without having last 2 lines.
prevHash, transaction = PoWGenerator(transaction) # Generating PoW for the current transaction and updating the transaction.
result.append(transaction)
# Generating the output file.
myFile = open("LongestChain.txt", "w")
for tra in result:
myFile.write(tra)
myFile.close()
| 33.175676 | 129 | 0.61833 |
from random import *
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
def serialnoncegenerator():
serial = str(randint(0, 2**128 - 1))
return serial
def payee():
payee = ""
for i in range(10):
num = randint(48, 90)
while(num > 57 and num < 65):
num = randint(48, 90)
payee += chr(num)
return payee
def satoshi():
return str(randint(1, 999))
def PoWGenerator(transaction):
new_tr = ""
PoW = ""
while True:
nonce = serialnoncegenerator()
noncestr = "Nonce: " + nonce + "\n"
new_tr = transaction + noncestr
PoW = hashlib.sha3_256(new_tr).hexdigest()
if PoW[:6] == "000000":
break
trPoW = "Proof of Work: " + PoW + "\n"
new_tr = new_tr + trPoW
return (PoW,new_tr)
def trWoutLastTwoLines(prevHash):
transaction = \
"*** Bitcoin transaction ***" + "\n" + \
"Serial number: " + serialnoncegenerator() + "\n" + \
"Payer: User Name" + "\n" + \
"Payee: " + payee() + "\n" + \
"Amount: " + satoshi() + " Satoshi" + "\n" + \
"Previous hash in the chain: " + prevHash + "\n"
return transaction
result = []
prevHash = ""
for i in range(10):
if i == 0:
prevHash = "First transaction"
transaction = trWoutLastTwoLines(prevHash)
prevHash, transaction = PoWGenerator(transaction)
result.append(transaction)
myFile = open("LongestChain.txt", "w")
for tra in result:
myFile.write(tra)
myFile.close()
| true | true |
f725069e16f136f40e31fccafac67c140404d6b4 | 59,971 | py | Python | flash/core/data/data_module.py | sumanmichael/lightning-flash | 4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db | [
"Apache-2.0"
] | null | null | null | flash/core/data/data_module.py | sumanmichael/lightning-flash | 4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db | [
"Apache-2.0"
] | null | null | null | flash/core/data/data_module.py | sumanmichael/lightning-flash | 4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db | [
"Apache-2.0"
] | 1 | 2021-07-14T09:17:46.000Z | 2021-07-14T09:17:46.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import IterableDataset, Subset
from torch.utils.data.sampler import Sampler
import flash
from flash.core.data.auto_dataset import BaseAutoDataset, IterableAutoDataset
from flash.core.data.base_viz import BaseVisualization
from flash.core.data.callback import BaseDataFetcher
from flash.core.data.data_pipeline import DataPipeline, DefaultPreprocess, Postprocess, Preprocess
from flash.core.data.data_source import DataSource, DefaultDataSources
from flash.core.data.splits import SplitDataset
from flash.core.data.utils import _STAGES_PREFIX
from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, requires
if _FIFTYONE_AVAILABLE and TYPE_CHECKING:
from fiftyone.core.collections import SampleCollection
else:
SampleCollection = None
class DataModule(pl.LightningDataModule):
"""A basic DataModule class for all Flash tasks. This class includes references to a
:class:`~flash.core.data.data_source.DataSource`, :class:`~flash.core.data.process.Preprocess`,
:class:`~flash.core.data.process.Postprocess`, and a :class:`~flash.core.data.callback.BaseDataFetcher`.
Args:
train_dataset: Dataset for training. Defaults to None.
val_dataset: Dataset for validating model performance during training. Defaults to None.
test_dataset: Dataset to test model performance. Defaults to None.
predict_dataset: Dataset for predicting. Defaults to None.
data_source: The :class:`~flash.core.data.data_source.DataSource` that was used to create the datasets.
preprocess: The :class:`~flash.core.data.process.Preprocess` to use when constructing the
:class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a
:class:`~flash.core.data.process.DefaultPreprocess` will be used.
postprocess: The :class:`~flash.core.data.process.Postprocess` to use when constructing the
:class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a plain
:class:`~flash.core.data.process.Postprocess` will be used.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to attach to the
:class:`~flash.core.data.process.Preprocess`. If ``None``, the output from
:meth:`~flash.core.data.data_module.DataModule.configure_data_fetcher` will be used.
val_split: An optional float which gives the relative amount of the training dataset to use for the validation
dataset.
batch_size: The batch size to be used by the DataLoader. Defaults to 1.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads,
or 0 for Windows or Darwin platform.
sampler: A sampler following the :class:`~torch.utils.data.sampler.Sampler` type.
Will be passed to the DataLoader for the training dataset. Defaults to None.
"""
preprocess_cls = DefaultPreprocess
postprocess_cls = Postprocess
def __init__(
self,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
data_source: Optional[DataSource] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
) -> None:
super().__init__()
if flash._IS_TESTING and torch.cuda.is_available():
batch_size = 16
self._data_source: DataSource = data_source
self._preprocess: Optional[Preprocess] = preprocess
self._postprocess: Optional[Postprocess] = postprocess
self._viz: Optional[BaseVisualization] = None
self._data_fetcher: Optional[BaseDataFetcher] = data_fetcher or self.configure_data_fetcher()
# TODO: Preprocess can change
self.data_fetcher.attach_to_preprocess(self.preprocess)
self._train_ds = train_dataset
self._val_ds = val_dataset
self._test_ds = test_dataset
self._predict_ds = predict_dataset
if self._train_ds is not None and (val_split is not None and self._val_ds is None):
self._train_ds, self._val_ds = self._split_train_val(self._train_ds, val_split)
if self._train_ds:
self.train_dataloader = self._train_dataloader
if self._val_ds:
self.val_dataloader = self._val_dataloader
if self._test_ds:
self.test_dataloader = self._test_dataloader
if self._predict_ds:
self.predict_dataloader = self._predict_dataloader
self.batch_size = batch_size
# TODO: figure out best solution for setting num_workers
if num_workers is None:
if platform.system() in ("Darwin", "Windows"):
num_workers = 0
else:
num_workers = os.cpu_count()
self.num_workers = num_workers
self.sampler = sampler
self.set_running_stages()
@property
def train_dataset(self) -> Optional[Dataset]:
"""This property returns the train dataset."""
return self._train_ds
@property
def val_dataset(self) -> Optional[Dataset]:
"""This property returns the validation dataset."""
return self._val_ds
@property
def test_dataset(self) -> Optional[Dataset]:
"""This property returns the test dataset."""
return self._test_ds
@property
def predict_dataset(self) -> Optional[Dataset]:
"""This property returns the predict dataset."""
return self._predict_ds
@property
def viz(self) -> BaseVisualization:
return self._viz or DataModule.configure_data_fetcher()
@viz.setter
def viz(self, viz: BaseVisualization) -> None:
self._viz = viz
@staticmethod
def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:
"""This function is used to configure a :class:`~flash.core.data.callback.BaseDataFetcher`.
Override with your custom one.
"""
return BaseDataFetcher()
@property
def data_fetcher(self) -> BaseDataFetcher:
return self._data_fetcher or DataModule.configure_data_fetcher()
@data_fetcher.setter
def data_fetcher(self, data_fetcher: BaseDataFetcher) -> None:
self._data_fetcher = data_fetcher
def _reset_iterator(self, stage: str) -> Iterable[Any]:
iter_name = f"_{stage}_iter"
# num_workers has to be set to 0 to work properly
num_workers = self.num_workers
self.num_workers = 0
dataloader_fn = getattr(self, f"{stage}_dataloader")
iterator = iter(dataloader_fn())
self.num_workers = num_workers
setattr(self, iter_name, iterator)
return iterator
def _show_batch(self, stage: str, func_names: Union[str, List[str]], reset: bool = True) -> None:
"""This function is used to handle transforms profiling for batch visualization."""
# don't show in CI
if os.getenv("FLASH_TESTING", "0") == "1":
return None
iter_name = f"_{stage}_iter"
if not hasattr(self, iter_name):
self._reset_iterator(stage)
# list of functions to visualise
if isinstance(func_names, str):
func_names = [func_names]
iter_dataloader = getattr(self, iter_name)
with self.data_fetcher.enable():
if reset:
self.data_fetcher.batches[stage] = {}
try:
_ = next(iter_dataloader)
except StopIteration:
iter_dataloader = self._reset_iterator(stage)
_ = next(iter_dataloader)
data_fetcher: BaseVisualization = self.data_fetcher
data_fetcher._show(stage, func_names)
if reset:
self.data_fetcher.batches[stage] = {}
def show_train_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the train dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.TRAINING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_val_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the validation dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.VALIDATING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_test_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the test dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.TESTING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_predict_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the predict dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]
self._show_batch(stage_name, hooks_names, reset=reset)
@staticmethod
def get_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, default: Optional[Any] = None) -> Any:
if isinstance(dataset, Subset):
return getattr(dataset.dataset, attr_name, default)
return getattr(dataset, attr_name, default)
@staticmethod
def set_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, value: Any) -> None:
if isinstance(dataset, Subset):
dataset = dataset.dataset
if isinstance(dataset, (Dataset, IterableDataset)):
setattr(dataset, attr_name, value)
def set_running_stages(self):
if self._train_ds:
self.set_dataset_attribute(self._train_ds, "running_stage", RunningStage.TRAINING)
if self._val_ds:
self.set_dataset_attribute(self._val_ds, "running_stage", RunningStage.VALIDATING)
if self._test_ds:
self.set_dataset_attribute(self._test_ds, "running_stage", RunningStage.TESTING)
if self._predict_ds:
self.set_dataset_attribute(self._predict_ds, "running_stage", RunningStage.PREDICTING)
def _resolve_collate_fn(self, dataset: Dataset, running_stage: RunningStage) -> Optional[Callable]:
if isinstance(dataset, (BaseAutoDataset, SplitDataset)):
return self.data_pipeline.worker_preprocessor(running_stage)
def _train_dataloader(self) -> DataLoader:
train_ds: Dataset = self._train_ds() if isinstance(self._train_ds, Callable) else self._train_ds
shuffle: bool = False
collate_fn = self._resolve_collate_fn(train_ds, RunningStage.TRAINING)
if isinstance(train_ds, IterableAutoDataset):
drop_last = False
else:
drop_last = len(train_ds) > self.batch_size
pin_memory = True
if self.sampler is None:
shuffle = not isinstance(train_ds, (IterableDataset, IterableAutoDataset))
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_train_dataset(
train_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
shuffle=shuffle,
drop_last=drop_last,
collate_fn=collate_fn,
sampler=self.sampler,
)
return DataLoader(
train_ds,
batch_size=self.batch_size,
shuffle=shuffle,
sampler=self.sampler,
num_workers=self.num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
collate_fn=collate_fn,
)
def _val_dataloader(self) -> DataLoader:
val_ds: Dataset = self._val_ds() if isinstance(self._val_ds, Callable) else self._val_ds
collate_fn = self._resolve_collate_fn(val_ds, RunningStage.VALIDATING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_val_dataset(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _test_dataloader(self) -> DataLoader:
test_ds: Dataset = self._test_ds() if isinstance(self._test_ds, Callable) else self._test_ds
collate_fn = self._resolve_collate_fn(test_ds, RunningStage.TESTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _predict_dataloader(self) -> DataLoader:
predict_ds: Dataset = self._predict_ds() if isinstance(self._predict_ds, Callable) else self._predict_ds
if isinstance(predict_ds, IterableAutoDataset):
batch_size = self.batch_size
else:
batch_size = min(self.batch_size, len(predict_ds) if len(predict_ds) > 0 else 1)
collate_fn = self._resolve_collate_fn(predict_ds, RunningStage.PREDICTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
predict_ds,
batch_size=batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
predict_ds, batch_size=batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=collate_fn
)
@property
def num_classes(self) -> Optional[int]:
n_cls_train = getattr(self.train_dataset, "num_classes", None)
n_cls_val = getattr(self.val_dataset, "num_classes", None)
n_cls_test = getattr(self.test_dataset, "num_classes", None)
return n_cls_train or n_cls_val or n_cls_test
@property
def multi_label(self) -> Optional[bool]:
multi_label_train = getattr(self.train_dataset, "multi_label", None)
multi_label_val = getattr(self.val_dataset, "multi_label", None)
multi_label_test = getattr(self.test_dataset, "multi_label", None)
return multi_label_train or multi_label_val or multi_label_test
@property
def data_source(self) -> Optional[DataSource]:
return self._data_source
@property
def preprocess(self) -> Preprocess:
return self._preprocess or self.preprocess_cls()
@property
def postprocess(self) -> Postprocess:
return self._postprocess or self.postprocess_cls()
@property
def data_pipeline(self) -> DataPipeline:
return DataPipeline(self.data_source, self.preprocess, self.postprocess)
def available_data_sources(self) -> Sequence[str]:
"""Get the list of available data source names for use with this
:class:`~flash.core.data.data_module.DataModule`.
Returns:
The list of data source names.
"""
return self.preprocess.available_data_sources()
@staticmethod
def _split_train_val(
train_dataset: Dataset,
val_split: float,
) -> Tuple[Any, Any]:
if not isinstance(val_split, float) or (isinstance(val_split, float) and val_split > 1 or val_split < 0):
raise MisconfigurationException(f"`val_split` should be a float between 0 and 1. Found {val_split}.")
if isinstance(train_dataset, IterableAutoDataset):
raise MisconfigurationException(
"`val_split` should be `None` when the dataset is built with an IterableDataset."
)
val_num_samples = int(len(train_dataset) * val_split)
indices = list(range(len(train_dataset)))
np.random.shuffle(indices)
val_indices = indices[:val_num_samples]
train_indices = indices[val_num_samples:]
return (
SplitDataset(train_dataset, train_indices, use_duplicated_indices=True),
SplitDataset(train_dataset, val_indices, use_duplicated_indices=True),
)
@classmethod
def from_data_source(
cls,
data_source: str,
train_data: Any = None,
val_data: Any = None,
test_data: Any = None,
predict_data: Any = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given inputs to
:meth:`~flash.core.data.data_source.DataSource.load_data` (``train_data``, ``val_data``, ``test_data``,
``predict_data``). The data source will be resolved from the instantiated
:class:`~flash.core.data.process.Preprocess`
using :meth:`~flash.core.data.process.Preprocess.data_source_of_name`.
Args:
data_source: The name of the data source to use for the
:meth:`~flash.core.data.data_source.DataSource.load_data`.
train_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the train dataset.
val_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the validation dataset.
test_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the test dataset.
predict_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the predict dataset.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls`` will be
constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_data_source(
DefaultDataSources.FOLDERS,
train_data="train_folder",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
preprocess = preprocess or cls.preprocess_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
**preprocess_kwargs,
)
data_source = preprocess.data_source_of_name(data_source)
train_dataset, val_dataset, test_dataset, predict_dataset = data_source.to_datasets(
train_data,
val_data,
test_data,
predict_data,
)
return cls(
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
data_source=data_source,
preprocess=preprocess,
data_fetcher=data_fetcher,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
)
@classmethod
def from_folders(
cls,
train_folder: Optional[str] = None,
val_folder: Optional[str] = None,
test_folder: Optional[str] = None,
predict_folder: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given folders using the
:class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FOLDERS`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_folder: The folder containing the train data.
val_folder: The folder containing the validation data.
test_folder: The folder containing the test data.
predict_folder: The folder containing the predict data.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_folders(
train_folder="train_folder",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FOLDERS,
train_folder,
val_folder,
test_folder,
predict_folder,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_files(
cls,
train_files: Optional[Sequence[str]] = None,
train_targets: Optional[Sequence[Any]] = None,
val_files: Optional[Sequence[str]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_files: Optional[Sequence[str]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_files: Optional[Sequence[str]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given sequences of files
using the :class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FILES` from the passed or constructed
:class:`~flash.core.data.process.Preprocess`.
Args:
train_files: A sequence of files to use as the train inputs.
train_targets: A sequence of targets (one per train file) to use as the train targets.
val_files: A sequence of files to use as the validation inputs.
val_targets: A sequence of targets (one per validation file) to use as the validation targets.
test_files: A sequence of files to use as the test inputs.
test_targets: A sequence of targets (one per test file) to use as the test targets.
predict_files: A sequence of files to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_files(
train_files=["image_1.png", "image_2.png", "image_3.png"],
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FILES,
(train_files, train_targets),
(val_files, val_targets),
(test_files, test_targets),
predict_files,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_tensors(
cls,
train_data: Optional[Collection[torch.Tensor]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[torch.Tensor]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[torch.Tensor]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[torch.Tensor]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given tensors using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.TENSOR`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_data: A tensor or collection of tensors to use as the train inputs.
train_targets: A sequence of targets (one per train input) to use as the train targets.
val_data: A tensor or collection of tensors to use as the validation inputs.
val_targets: A sequence of targets (one per validation input) to use as the validation targets.
test_data: A tensor or collection of tensors to use as the test inputs.
test_targets: A sequence of targets (one per test input) to use as the test targets.
predict_data: A tensor or collection of tensors to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_tensors(
train_files=torch.rand(3, 128),
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.TENSORS,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_numpy(
cls,
train_data: Optional[Collection[np.ndarray]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[np.ndarray]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[np.ndarray]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[np.ndarray]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given numpy array using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.NUMPY`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_data: A numpy array to use as the train inputs.
train_targets: A sequence of targets (one per train input) to use as the train targets.
val_data: A numpy array to use as the validation inputs.
val_targets: A sequence of targets (one per validation input) to use as the validation targets.
test_data: A numpy array to use as the test inputs.
test_targets: A sequence of targets (one per test input) to use as the test targets.
predict_data: A numpy array to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_numpy(
train_files=np.random.rand(3, 128),
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.NUMPY,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_json(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
field: Optional[str] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given JSON files using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.JSON`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
input_fields: The field or fields in the JSON objects to use for the input.
target_fields: The field or fields in the JSON objects to use for the target.
train_file: The JSON file containing the training data.
val_file: The JSON file containing the validation data.
test_file: The JSON file containing the testing data.
predict_file: The JSON file containing the data to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
field: To specify the field that holds the data in the JSON file.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_json(
"input",
"target",
train_file="train_data.json",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
# In the case where the data is of the form:
# {
# "version": 0.0.x,
# "data": [
# {
# "input_field" : "input_data",
# "target_field" : "target_output"
# },
# ...
# ]
# }
data_module = DataModule.from_json(
"input",
"target",
train_file="train_data.json",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
feild="data"
)
"""
return cls.from_data_source(
DefaultDataSources.JSON,
(train_file, input_fields, target_fields, field),
(val_file, input_fields, target_fields, field),
(test_file, input_fields, target_fields, field),
(predict_file, input_fields, target_fields, field),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_csv(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given CSV files using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.CSV`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
input_fields: The field or fields (columns) in the CSV file to use for the input.
target_fields: The field or fields (columns) in the CSV file to use for the target.
train_file: The CSV file containing the training data.
val_file: The CSV file containing the validation data.
test_file: The CSV file containing the testing data.
predict_file: The CSV file containing the data to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_csv(
"input",
"target",
train_file="train_data.csv",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.CSV,
(train_file, input_fields, target_fields),
(val_file, input_fields, target_fields),
(test_file, input_fields, target_fields),
(predict_file, input_fields, target_fields),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_datasets(
cls,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given datasets using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.DATASETS`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_dataset: Dataset used during training.
val_dataset: Dataset used during validating.
test_dataset: Dataset used during testing.
predict_dataset: Dataset used during predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_datasets(
train_dataset=train_dataset,
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.DATASETS,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
@requires("fiftyone")
def from_fiftyone(
cls,
train_dataset: Optional[SampleCollection] = None,
val_dataset: Optional[SampleCollection] = None,
test_dataset: Optional[SampleCollection] = None,
predict_dataset: Optional[SampleCollection] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object
from the given FiftyOne Datasets using the
:class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FIFTYONE`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the train data.
val_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the validation data.
test_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the test data.
predict_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the predict data.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
train_dataset = fo.Dataset.from_dir(
"/path/to/dataset",
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
data_module = DataModule.from_fiftyone(
train_data = train_dataset,
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FIFTYONE,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
**preprocess_kwargs,
)
| 48.402744 | 119 | 0.644712 |
import os
import platform
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import IterableDataset, Subset
from torch.utils.data.sampler import Sampler
import flash
from flash.core.data.auto_dataset import BaseAutoDataset, IterableAutoDataset
from flash.core.data.base_viz import BaseVisualization
from flash.core.data.callback import BaseDataFetcher
from flash.core.data.data_pipeline import DataPipeline, DefaultPreprocess, Postprocess, Preprocess
from flash.core.data.data_source import DataSource, DefaultDataSources
from flash.core.data.splits import SplitDataset
from flash.core.data.utils import _STAGES_PREFIX
from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, requires
if _FIFTYONE_AVAILABLE and TYPE_CHECKING:
from fiftyone.core.collections import SampleCollection
else:
SampleCollection = None
class DataModule(pl.LightningDataModule):
preprocess_cls = DefaultPreprocess
postprocess_cls = Postprocess
def __init__(
self,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
data_source: Optional[DataSource] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
) -> None:
super().__init__()
if flash._IS_TESTING and torch.cuda.is_available():
batch_size = 16
self._data_source: DataSource = data_source
self._preprocess: Optional[Preprocess] = preprocess
self._postprocess: Optional[Postprocess] = postprocess
self._viz: Optional[BaseVisualization] = None
self._data_fetcher: Optional[BaseDataFetcher] = data_fetcher or self.configure_data_fetcher()
self.data_fetcher.attach_to_preprocess(self.preprocess)
self._train_ds = train_dataset
self._val_ds = val_dataset
self._test_ds = test_dataset
self._predict_ds = predict_dataset
if self._train_ds is not None and (val_split is not None and self._val_ds is None):
self._train_ds, self._val_ds = self._split_train_val(self._train_ds, val_split)
if self._train_ds:
self.train_dataloader = self._train_dataloader
if self._val_ds:
self.val_dataloader = self._val_dataloader
if self._test_ds:
self.test_dataloader = self._test_dataloader
if self._predict_ds:
self.predict_dataloader = self._predict_dataloader
self.batch_size = batch_size
if num_workers is None:
if platform.system() in ("Darwin", "Windows"):
num_workers = 0
else:
num_workers = os.cpu_count()
self.num_workers = num_workers
self.sampler = sampler
self.set_running_stages()
@property
def train_dataset(self) -> Optional[Dataset]:
return self._train_ds
@property
def val_dataset(self) -> Optional[Dataset]:
return self._val_ds
@property
def test_dataset(self) -> Optional[Dataset]:
return self._test_ds
@property
def predict_dataset(self) -> Optional[Dataset]:
return self._predict_ds
@property
def viz(self) -> BaseVisualization:
return self._viz or DataModule.configure_data_fetcher()
@viz.setter
def viz(self, viz: BaseVisualization) -> None:
self._viz = viz
@staticmethod
def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:
return BaseDataFetcher()
@property
def data_fetcher(self) -> BaseDataFetcher:
return self._data_fetcher or DataModule.configure_data_fetcher()
@data_fetcher.setter
def data_fetcher(self, data_fetcher: BaseDataFetcher) -> None:
self._data_fetcher = data_fetcher
def _reset_iterator(self, stage: str) -> Iterable[Any]:
iter_name = f"_{stage}_iter"
num_workers = self.num_workers
self.num_workers = 0
dataloader_fn = getattr(self, f"{stage}_dataloader")
iterator = iter(dataloader_fn())
self.num_workers = num_workers
setattr(self, iter_name, iterator)
return iterator
def _show_batch(self, stage: str, func_names: Union[str, List[str]], reset: bool = True) -> None:
if os.getenv("FLASH_TESTING", "0") == "1":
return None
iter_name = f"_{stage}_iter"
if not hasattr(self, iter_name):
self._reset_iterator(stage)
# list of functions to visualise
if isinstance(func_names, str):
func_names = [func_names]
iter_dataloader = getattr(self, iter_name)
with self.data_fetcher.enable():
if reset:
self.data_fetcher.batches[stage] = {}
try:
_ = next(iter_dataloader)
except StopIteration:
iter_dataloader = self._reset_iterator(stage)
_ = next(iter_dataloader)
data_fetcher: BaseVisualization = self.data_fetcher
data_fetcher._show(stage, func_names)
if reset:
self.data_fetcher.batches[stage] = {}
def show_train_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
stage_name: str = _STAGES_PREFIX[RunningStage.TRAINING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_val_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
stage_name: str = _STAGES_PREFIX[RunningStage.VALIDATING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_test_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
stage_name: str = _STAGES_PREFIX[RunningStage.TESTING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_predict_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]
self._show_batch(stage_name, hooks_names, reset=reset)
@staticmethod
def get_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, default: Optional[Any] = None) -> Any:
if isinstance(dataset, Subset):
return getattr(dataset.dataset, attr_name, default)
return getattr(dataset, attr_name, default)
@staticmethod
def set_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, value: Any) -> None:
if isinstance(dataset, Subset):
dataset = dataset.dataset
if isinstance(dataset, (Dataset, IterableDataset)):
setattr(dataset, attr_name, value)
def set_running_stages(self):
if self._train_ds:
self.set_dataset_attribute(self._train_ds, "running_stage", RunningStage.TRAINING)
if self._val_ds:
self.set_dataset_attribute(self._val_ds, "running_stage", RunningStage.VALIDATING)
if self._test_ds:
self.set_dataset_attribute(self._test_ds, "running_stage", RunningStage.TESTING)
if self._predict_ds:
self.set_dataset_attribute(self._predict_ds, "running_stage", RunningStage.PREDICTING)
def _resolve_collate_fn(self, dataset: Dataset, running_stage: RunningStage) -> Optional[Callable]:
if isinstance(dataset, (BaseAutoDataset, SplitDataset)):
return self.data_pipeline.worker_preprocessor(running_stage)
def _train_dataloader(self) -> DataLoader:
train_ds: Dataset = self._train_ds() if isinstance(self._train_ds, Callable) else self._train_ds
shuffle: bool = False
collate_fn = self._resolve_collate_fn(train_ds, RunningStage.TRAINING)
if isinstance(train_ds, IterableAutoDataset):
drop_last = False
else:
drop_last = len(train_ds) > self.batch_size
pin_memory = True
if self.sampler is None:
shuffle = not isinstance(train_ds, (IterableDataset, IterableAutoDataset))
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_train_dataset(
train_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
shuffle=shuffle,
drop_last=drop_last,
collate_fn=collate_fn,
sampler=self.sampler,
)
return DataLoader(
train_ds,
batch_size=self.batch_size,
shuffle=shuffle,
sampler=self.sampler,
num_workers=self.num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
collate_fn=collate_fn,
)
def _val_dataloader(self) -> DataLoader:
val_ds: Dataset = self._val_ds() if isinstance(self._val_ds, Callable) else self._val_ds
collate_fn = self._resolve_collate_fn(val_ds, RunningStage.VALIDATING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_val_dataset(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _test_dataloader(self) -> DataLoader:
test_ds: Dataset = self._test_ds() if isinstance(self._test_ds, Callable) else self._test_ds
collate_fn = self._resolve_collate_fn(test_ds, RunningStage.TESTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _predict_dataloader(self) -> DataLoader:
predict_ds: Dataset = self._predict_ds() if isinstance(self._predict_ds, Callable) else self._predict_ds
if isinstance(predict_ds, IterableAutoDataset):
batch_size = self.batch_size
else:
batch_size = min(self.batch_size, len(predict_ds) if len(predict_ds) > 0 else 1)
collate_fn = self._resolve_collate_fn(predict_ds, RunningStage.PREDICTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
predict_ds,
batch_size=batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
predict_ds, batch_size=batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=collate_fn
)
@property
def num_classes(self) -> Optional[int]:
n_cls_train = getattr(self.train_dataset, "num_classes", None)
n_cls_val = getattr(self.val_dataset, "num_classes", None)
n_cls_test = getattr(self.test_dataset, "num_classes", None)
return n_cls_train or n_cls_val or n_cls_test
@property
def multi_label(self) -> Optional[bool]:
multi_label_train = getattr(self.train_dataset, "multi_label", None)
multi_label_val = getattr(self.val_dataset, "multi_label", None)
multi_label_test = getattr(self.test_dataset, "multi_label", None)
return multi_label_train or multi_label_val or multi_label_test
@property
def data_source(self) -> Optional[DataSource]:
return self._data_source
@property
def preprocess(self) -> Preprocess:
return self._preprocess or self.preprocess_cls()
@property
def postprocess(self) -> Postprocess:
return self._postprocess or self.postprocess_cls()
@property
def data_pipeline(self) -> DataPipeline:
return DataPipeline(self.data_source, self.preprocess, self.postprocess)
def available_data_sources(self) -> Sequence[str]:
return self.preprocess.available_data_sources()
@staticmethod
def _split_train_val(
train_dataset: Dataset,
val_split: float,
) -> Tuple[Any, Any]:
if not isinstance(val_split, float) or (isinstance(val_split, float) and val_split > 1 or val_split < 0):
raise MisconfigurationException(f"`val_split` should be a float between 0 and 1. Found {val_split}.")
if isinstance(train_dataset, IterableAutoDataset):
raise MisconfigurationException(
"`val_split` should be `None` when the dataset is built with an IterableDataset."
)
val_num_samples = int(len(train_dataset) * val_split)
indices = list(range(len(train_dataset)))
np.random.shuffle(indices)
val_indices = indices[:val_num_samples]
train_indices = indices[val_num_samples:]
return (
SplitDataset(train_dataset, train_indices, use_duplicated_indices=True),
SplitDataset(train_dataset, val_indices, use_duplicated_indices=True),
)
@classmethod
def from_data_source(
cls,
data_source: str,
train_data: Any = None,
val_data: Any = None,
test_data: Any = None,
predict_data: Any = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
preprocess = preprocess or cls.preprocess_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
**preprocess_kwargs,
)
data_source = preprocess.data_source_of_name(data_source)
train_dataset, val_dataset, test_dataset, predict_dataset = data_source.to_datasets(
train_data,
val_data,
test_data,
predict_data,
)
return cls(
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
data_source=data_source,
preprocess=preprocess,
data_fetcher=data_fetcher,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
)
@classmethod
def from_folders(
cls,
train_folder: Optional[str] = None,
val_folder: Optional[str] = None,
test_folder: Optional[str] = None,
predict_folder: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.FOLDERS,
train_folder,
val_folder,
test_folder,
predict_folder,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_files(
cls,
train_files: Optional[Sequence[str]] = None,
train_targets: Optional[Sequence[Any]] = None,
val_files: Optional[Sequence[str]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_files: Optional[Sequence[str]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_files: Optional[Sequence[str]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.FILES,
(train_files, train_targets),
(val_files, val_targets),
(test_files, test_targets),
predict_files,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_tensors(
cls,
train_data: Optional[Collection[torch.Tensor]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[torch.Tensor]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[torch.Tensor]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[torch.Tensor]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.TENSORS,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_numpy(
cls,
train_data: Optional[Collection[np.ndarray]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[np.ndarray]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[np.ndarray]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[np.ndarray]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.NUMPY,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_json(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
field: Optional[str] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.JSON,
(train_file, input_fields, target_fields, field),
(val_file, input_fields, target_fields, field),
(test_file, input_fields, target_fields, field),
(predict_file, input_fields, target_fields, field),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_csv(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.CSV,
(train_file, input_fields, target_fields),
(val_file, input_fields, target_fields),
(test_file, input_fields, target_fields),
(predict_file, input_fields, target_fields),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_datasets(
cls,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.DATASETS,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
@requires("fiftyone")
def from_fiftyone(
cls,
train_dataset: Optional[SampleCollection] = None,
val_dataset: Optional[SampleCollection] = None,
test_dataset: Optional[SampleCollection] = None,
predict_dataset: Optional[SampleCollection] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
return cls.from_data_source(
DefaultDataSources.FIFTYONE,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
**preprocess_kwargs,
)
| true | true |
f72506abcd96241b0e568bab11db58147f3f22c6 | 13,446 | py | Python | source/rttov_test/profile-datasets-py/div83/028.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | source/rttov_test/profile-datasets-py/div83/028.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T12:19:59.000Z | 2022-03-12T12:19:59.000Z | source/rttov_test/profile-datasets-py/div83/028.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | """
Profile ../profile-datasets-py/div83/028.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/028.py"
self["Q"] = numpy.array([ 2.70658300e+00, 2.88421200e+00, 3.36234900e+00,
4.31645100e+00, 5.09368400e+00, 5.28904200e+00,
5.19020300e+00, 5.37709100e+00, 5.81179600e+00,
6.08195300e+00, 6.10215300e+00, 6.10604300e+00,
6.12691200e+00, 6.14242200e+00, 6.13258200e+00,
6.07811300e+00, 5.93228500e+00, 5.70609700e+00,
5.40576100e+00, 5.05456400e+00, 4.69607800e+00,
4.41534100e+00, 4.18436200e+00, 3.99542400e+00,
3.83612500e+00, 3.68572600e+00, 3.53743700e+00,
3.42014800e+00, 3.34060900e+00, 3.29236900e+00,
3.26049900e+00, 3.23329000e+00, 3.19587000e+00,
3.14459000e+00, 3.07860100e+00, 3.00642100e+00,
2.93912100e+00, 2.88521200e+00, 2.84905200e+00,
2.83165200e+00, 2.82883200e+00, 2.82954200e+00,
2.82819200e+00, 2.82242200e+00, 2.80869200e+00,
2.78689200e+00, 2.75919200e+00, 2.73845300e+00,
2.73261300e+00, 2.73094300e+00, 2.76521200e+00,
2.88293200e+00, 3.08358000e+00, 3.25216900e+00,
3.36816900e+00, 3.57363700e+00, 4.08970300e+00,
4.79533700e+00, 5.36314100e+00, 6.07875300e+00,
6.96754100e+00, 7.93924700e+00, 8.66240500e+00,
9.61853700e+00, 1.07741800e+01, 1.21489500e+01,
1.39513100e+01, 1.62331400e+01, 1.91987300e+01,
2.30749700e+01, 3.25815400e+01, 4.45335200e+01,
5.84331900e+01, 6.90079400e+01, 9.48516000e+01,
1.35035800e+02, 2.00376800e+02, 2.45029900e+02,
2.73666100e+02, 2.87530300e+02, 3.16561800e+02,
3.58260600e+02, 4.11909300e+02, 4.63045500e+02,
5.01176700e+02, 5.27209900e+02, 5.36886600e+02,
8.34994200e+02, 1.80191700e+03, 2.49548700e+03,
2.75726600e+03, 2.84195000e+03, 3.28452600e+03,
3.45919200e+03, 3.54301200e+03, 3.61181700e+03,
3.70948800e+03, 4.03132300e+03, 3.92145200e+03,
3.81598200e+03, 3.71468000e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 375.157 , 375.1549, 375.1517, 375.1454, 375.1361, 375.123 ,
375.1071, 375.087 , 375.0668, 375.0397, 375.0107, 374.9887,
374.9787, 374.9797, 374.9977, 375.0777, 375.2648, 375.5219,
375.781 , 376.0111, 376.2142, 376.3473, 376.4444, 376.4995,
376.5366, 376.5616, 376.5587, 376.5497, 376.5067, 376.4628,
376.4328, 376.4008, 376.4098, 376.4218, 376.4898, 376.5879,
376.7209, 376.9269, 377.1439, 377.4289, 377.7349, 378.0429,
378.3499, 378.6699, 378.8849, 379.1099, 379.426 , 379.818 ,
380.22 , 380.6 , 380.9949, 381.1929, 381.3058, 381.3868,
381.3797, 381.3736, 381.4184, 381.4712, 381.603 , 381.8037,
381.9853, 382.093 , 382.2037, 382.2303, 382.2499, 382.2444,
382.2217, 382.1838, 382.1207, 382.0572, 381.9996, 381.94 ,
381.9237, 381.9096, 381.9038, 381.8944, 381.8765, 381.8654,
381.8535, 381.8452, 381.8181, 381.7832, 381.7287, 381.6712,
381.6146, 381.5577, 381.5041, 381.3403, 380.9254, 380.6148,
380.4651, 380.347 , 380.1074, 379.98 , 379.9132, 379.88 ,
379.8368, 379.7091, 379.748 , 379.7862, 379.8238])
self["CO"] = numpy.array([ 0.08586157, 0.08822425, 0.09316929, 0.1023676 , 0.1185754 ,
0.1468322 , 0.1650141 , 0.1423442 , 0.1988138 , 0.2464785 ,
0.2425355 , 0.1728059 , 0.09032735, 0.05148888, 0.04070355,
0.02625254, 0.01856279, 0.01646571, 0.01638821, 0.01663942,
0.01699772, 0.01728332, 0.01752593, 0.01768283, 0.01776983,
0.01781003, 0.01765084, 0.01745274, 0.01717194, 0.01689034,
0.01677895, 0.01666045, 0.01666065, 0.01666605, 0.01682295,
0.01706175, 0.01745855, 0.01820485, 0.01902765, 0.02024864,
0.02165474, 0.02315113, 0.02472473, 0.02648693, 0.02833782,
0.03041192, 0.03305521, 0.0363218 , 0.03988319, 0.04259638,
0.04561457, 0.04762276, 0.04920275, 0.05076773, 0.05211422,
0.05354631, 0.05613547, 0.05906392, 0.06294046, 0.06783429,
0.07255159, 0.0749807 , 0.07756863, 0.07782795, 0.07786446,
0.07780645, 0.07769262, 0.07765014, 0.07772161, 0.0777983 ,
0.07789106, 0.07798593, 0.07807734, 0.07817121, 0.07830787,
0.0784506 , 0.07860355, 0.0787634 , 0.07888221, 0.07899858,
0.07900668, 0.07900059, 0.07892907, 0.07883838, 0.07871643,
0.07857295, 0.07842407, 0.07827489, 0.07809013, 0.07792565,
0.07775791, 0.07732203, 0.07679743, 0.07609317, 0.07584283,
0.07573138, 0.07569875, 0.07569213, 0.07580885, 0.07592695,
0.07604646])
self["T"] = numpy.array([ 192.286, 199.539, 213.251, 231.442, 250.157, 264.95 ,
273.398, 275.988, 274.097, 268.487, 258.113, 251.109,
244.191, 236.22 , 228.14 , 222.084, 217.46 , 212.602,
207.757, 203.601, 201.12 , 200.706, 201.105, 201.977,
203.045, 204.056, 204.756, 205.56 , 206.453, 207.334,
208.062, 208.554, 208.87 , 209.253, 209.669, 210.106,
210.665, 211.542, 212.867, 214.547, 216.311, 217.838,
218.912, 219.576, 219.8 , 219.676, 219.51 , 219.534,
219.844, 220.107, 220.317, 220.448, 220.385, 220.111,
219.629, 218.951, 218.129, 217.334, 216.702, 216.3 ,
216.18 , 216.383, 217.044, 217.933, 219.029, 220.335,
221.923, 223.603, 225.325, 227.06 , 228.825, 230.687,
232.647, 234.715, 236.751, 238.774, 240.843, 242.923,
244.981, 246.857, 248.589, 250.26 , 251.903, 253.563,
255.223, 256.949, 258.721, 260.01 , 260.408, 261.77 ,
263.794, 265.708, 267.388, 269.546, 271.8 , 274.023,
276.301, 277.785, 277.785, 277.785, 277.785])
self["N2O"] = numpy.array([ 0.00843998, 0.00675998, 0.00550998, 0.00451998, 0.00367998,
0.00292998, 0.00182999, 0.00093999, 0.00086 , 0.00346998,
0.00574997, 0.00809995, 0.01049994, 0.01381992, 0.0167699 ,
0.01908988, 0.02167987, 0.02497986, 0.02886984, 0.03770981,
0.04611978, 0.06027973, 0.07687968, 0.09277963, 0.1074996 ,
0.1215696 , 0.1351095 , 0.1461595 , 0.1561495 , 0.1657995 ,
0.1742494 , 0.1789794 , 0.1835694 , 0.1880094 , 0.1941394 ,
0.2008494 , 0.2071794 , 0.2146194 , 0.2221894 , 0.2293794 ,
0.2371893 , 0.2450493 , 0.2529193 , 0.2607493 , 0.2684792 ,
0.2760492 , 0.2834092 , 0.2904892 , 0.2972092 , 0.3034892 ,
0.3092591 , 0.3144191 , 0.318879 , 0.320709 , 0.3223789 ,
0.3238688 , 0.3251487 , 0.3261884 , 0.3269782 , 0.327468 ,
0.3276377 , 0.3276374 , 0.3276372 , 0.3276368 , 0.3276365 ,
0.327636 , 0.3276354 , 0.3276347 , 0.3276337 , 0.3276324 ,
0.3276293 , 0.3276254 , 0.3276209 , 0.3276174 , 0.3276089 ,
0.3275958 , 0.3275743 , 0.3275597 , 0.3275503 , 0.3275458 ,
0.3275363 , 0.3275226 , 0.327505 , 0.3274883 , 0.3274758 ,
0.3274673 , 0.3274641 , 0.3273664 , 0.3270496 , 0.3268224 ,
0.3267366 , 0.3267089 , 0.3265639 , 0.3265066 , 0.3264792 ,
0.3264566 , 0.3264246 , 0.3263192 , 0.3263552 , 0.3263897 ,
0.3264229 ])
self["O3"] = numpy.array([ 0.1874915 , 0.2149024 , 0.285496 , 0.452577 , 0.6652036 ,
0.8636454 , 1.069974 , 1.339963 , 1.74506 , 2.367676 ,
3.20938 , 3.929546 , 4.632512 , 5.261088 , 5.711085 ,
5.883594 , 6.014724 , 6.133965 , 6.165117 , 6.02297 ,
5.613614 , 4.935388 , 4.273622 , 3.776325 , 3.563446 ,
3.711146 , 3.983026 , 3.953546 , 3.702878 , 3.374489 ,
3.12198 , 2.98387 , 2.886441 , 2.747311 , 2.547492 ,
2.304513 , 2.054054 , 1.818675 , 1.589585 , 1.352436 ,
1.123747 , 0.9392643 , 0.8266587 , 0.7792758 , 0.7783948 ,
0.8330277 , 0.9674253 , 0.9618644 , 0.8516127 , 0.7847689 ,
0.7466939 , 0.7239019 , 0.7068658 , 0.6795178 , 0.6289309 ,
0.552276 , 0.4571801 , 0.3607593 , 0.2771505 , 0.2096017 ,
0.1594669 , 0.125989 , 0.1039241 , 0.08256201, 0.06434251,
0.05068798, 0.04490537, 0.04165022, 0.03932574, 0.03722404,
0.03566284, 0.03436047, 0.03329735, 0.03248326, 0.03198487,
0.03175341, 0.03176373, 0.03195377, 0.03239443, 0.03349827,
0.03453746, 0.03505364, 0.03472099, 0.03362072, 0.03259216,
0.03219242, 0.03263077, 0.03360821, 0.03466293, 0.0363357 ,
0.03850264, 0.03980027, 0.03990021, 0.03985157, 0.03947275,
0.03814204, 0.03129189, 0.02650153, 0.02650445, 0.02650726,
0.02650996])
self["CH4"] = numpy.array([ 0.08335807, 0.1208587 , 0.1487335 , 0.1710033 , 0.204093 ,
0.2627246 , 0.2753886 , 0.2884114 , 0.3086782 , 0.3450409 ,
0.3803777 , 0.4445503 , 0.5298488 , 0.6725639 , 0.8022351 ,
0.9129405 , 1.009404 , 1.083974 , 1.152114 , 1.199524 ,
1.244704 , 1.297344 , 1.352464 , 1.405274 , 1.474424 ,
1.543284 , 1.609544 , 1.646784 , 1.673834 , 1.669345 ,
1.664525 , 1.659355 , 1.653835 , 1.638835 , 1.624395 ,
1.610745 , 1.598205 , 1.587065 , 1.588215 , 1.589415 ,
1.590696 , 1.592025 , 1.593435 , 1.623955 , 1.645655 ,
1.668345 , 1.686135 , 1.700245 , 1.713615 , 1.718985 ,
1.724565 , 1.730115 , 1.735775 , 1.740904 , 1.744604 ,
1.748444 , 1.750363 , 1.752232 , 1.753511 , 1.754389 ,
1.755058 , 1.755206 , 1.755345 , 1.755023 , 1.754691 ,
1.753979 , 1.753136 , 1.752172 , 1.751106 , 1.75008 ,
1.749123 , 1.748182 , 1.747308 , 1.746459 , 1.745774 ,
1.745114 , 1.74469 , 1.744322 , 1.744063 , 1.743828 ,
1.743578 , 1.743295 , 1.742992 , 1.742723 , 1.742476 ,
1.742291 , 1.742144 , 1.741505 , 1.73969 , 1.738371 ,
1.737825 , 1.737618 , 1.736797 , 1.736442 , 1.736296 ,
1.736177 , 1.736016 , 1.735456 , 1.735647 , 1.735831 ,
1.736007 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 277.785
self["S2M"]["Q"] = 3714.67970528
self["S2M"]["O"] = 0.0265099568307
self["S2M"]["P"] = 1003.55103
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 277.785
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -47.333
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 8, 1])
self["TIME"] = numpy.array([0, 0, 0])
| 57.956897 | 92 | 0.566711 |
self["ID"] = "../profile-datasets-py/div83/028.py"
self["Q"] = numpy.array([ 2.70658300e+00, 2.88421200e+00, 3.36234900e+00,
4.31645100e+00, 5.09368400e+00, 5.28904200e+00,
5.19020300e+00, 5.37709100e+00, 5.81179600e+00,
6.08195300e+00, 6.10215300e+00, 6.10604300e+00,
6.12691200e+00, 6.14242200e+00, 6.13258200e+00,
6.07811300e+00, 5.93228500e+00, 5.70609700e+00,
5.40576100e+00, 5.05456400e+00, 4.69607800e+00,
4.41534100e+00, 4.18436200e+00, 3.99542400e+00,
3.83612500e+00, 3.68572600e+00, 3.53743700e+00,
3.42014800e+00, 3.34060900e+00, 3.29236900e+00,
3.26049900e+00, 3.23329000e+00, 3.19587000e+00,
3.14459000e+00, 3.07860100e+00, 3.00642100e+00,
2.93912100e+00, 2.88521200e+00, 2.84905200e+00,
2.83165200e+00, 2.82883200e+00, 2.82954200e+00,
2.82819200e+00, 2.82242200e+00, 2.80869200e+00,
2.78689200e+00, 2.75919200e+00, 2.73845300e+00,
2.73261300e+00, 2.73094300e+00, 2.76521200e+00,
2.88293200e+00, 3.08358000e+00, 3.25216900e+00,
3.36816900e+00, 3.57363700e+00, 4.08970300e+00,
4.79533700e+00, 5.36314100e+00, 6.07875300e+00,
6.96754100e+00, 7.93924700e+00, 8.66240500e+00,
9.61853700e+00, 1.07741800e+01, 1.21489500e+01,
1.39513100e+01, 1.62331400e+01, 1.91987300e+01,
2.30749700e+01, 3.25815400e+01, 4.45335200e+01,
5.84331900e+01, 6.90079400e+01, 9.48516000e+01,
1.35035800e+02, 2.00376800e+02, 2.45029900e+02,
2.73666100e+02, 2.87530300e+02, 3.16561800e+02,
3.58260600e+02, 4.11909300e+02, 4.63045500e+02,
5.01176700e+02, 5.27209900e+02, 5.36886600e+02,
8.34994200e+02, 1.80191700e+03, 2.49548700e+03,
2.75726600e+03, 2.84195000e+03, 3.28452600e+03,
3.45919200e+03, 3.54301200e+03, 3.61181700e+03,
3.70948800e+03, 4.03132300e+03, 3.92145200e+03,
3.81598200e+03, 3.71468000e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 375.157 , 375.1549, 375.1517, 375.1454, 375.1361, 375.123 ,
375.1071, 375.087 , 375.0668, 375.0397, 375.0107, 374.9887,
374.9787, 374.9797, 374.9977, 375.0777, 375.2648, 375.5219,
375.781 , 376.0111, 376.2142, 376.3473, 376.4444, 376.4995,
376.5366, 376.5616, 376.5587, 376.5497, 376.5067, 376.4628,
376.4328, 376.4008, 376.4098, 376.4218, 376.4898, 376.5879,
376.7209, 376.9269, 377.1439, 377.4289, 377.7349, 378.0429,
378.3499, 378.6699, 378.8849, 379.1099, 379.426 , 379.818 ,
380.22 , 380.6 , 380.9949, 381.1929, 381.3058, 381.3868,
381.3797, 381.3736, 381.4184, 381.4712, 381.603 , 381.8037,
381.9853, 382.093 , 382.2037, 382.2303, 382.2499, 382.2444,
382.2217, 382.1838, 382.1207, 382.0572, 381.9996, 381.94 ,
381.9237, 381.9096, 381.9038, 381.8944, 381.8765, 381.8654,
381.8535, 381.8452, 381.8181, 381.7832, 381.7287, 381.6712,
381.6146, 381.5577, 381.5041, 381.3403, 380.9254, 380.6148,
380.4651, 380.347 , 380.1074, 379.98 , 379.9132, 379.88 ,
379.8368, 379.7091, 379.748 , 379.7862, 379.8238])
self["CO"] = numpy.array([ 0.08586157, 0.08822425, 0.09316929, 0.1023676 , 0.1185754 ,
0.1468322 , 0.1650141 , 0.1423442 , 0.1988138 , 0.2464785 ,
0.2425355 , 0.1728059 , 0.09032735, 0.05148888, 0.04070355,
0.02625254, 0.01856279, 0.01646571, 0.01638821, 0.01663942,
0.01699772, 0.01728332, 0.01752593, 0.01768283, 0.01776983,
0.01781003, 0.01765084, 0.01745274, 0.01717194, 0.01689034,
0.01677895, 0.01666045, 0.01666065, 0.01666605, 0.01682295,
0.01706175, 0.01745855, 0.01820485, 0.01902765, 0.02024864,
0.02165474, 0.02315113, 0.02472473, 0.02648693, 0.02833782,
0.03041192, 0.03305521, 0.0363218 , 0.03988319, 0.04259638,
0.04561457, 0.04762276, 0.04920275, 0.05076773, 0.05211422,
0.05354631, 0.05613547, 0.05906392, 0.06294046, 0.06783429,
0.07255159, 0.0749807 , 0.07756863, 0.07782795, 0.07786446,
0.07780645, 0.07769262, 0.07765014, 0.07772161, 0.0777983 ,
0.07789106, 0.07798593, 0.07807734, 0.07817121, 0.07830787,
0.0784506 , 0.07860355, 0.0787634 , 0.07888221, 0.07899858,
0.07900668, 0.07900059, 0.07892907, 0.07883838, 0.07871643,
0.07857295, 0.07842407, 0.07827489, 0.07809013, 0.07792565,
0.07775791, 0.07732203, 0.07679743, 0.07609317, 0.07584283,
0.07573138, 0.07569875, 0.07569213, 0.07580885, 0.07592695,
0.07604646])
self["T"] = numpy.array([ 192.286, 199.539, 213.251, 231.442, 250.157, 264.95 ,
273.398, 275.988, 274.097, 268.487, 258.113, 251.109,
244.191, 236.22 , 228.14 , 222.084, 217.46 , 212.602,
207.757, 203.601, 201.12 , 200.706, 201.105, 201.977,
203.045, 204.056, 204.756, 205.56 , 206.453, 207.334,
208.062, 208.554, 208.87 , 209.253, 209.669, 210.106,
210.665, 211.542, 212.867, 214.547, 216.311, 217.838,
218.912, 219.576, 219.8 , 219.676, 219.51 , 219.534,
219.844, 220.107, 220.317, 220.448, 220.385, 220.111,
219.629, 218.951, 218.129, 217.334, 216.702, 216.3 ,
216.18 , 216.383, 217.044, 217.933, 219.029, 220.335,
221.923, 223.603, 225.325, 227.06 , 228.825, 230.687,
232.647, 234.715, 236.751, 238.774, 240.843, 242.923,
244.981, 246.857, 248.589, 250.26 , 251.903, 253.563,
255.223, 256.949, 258.721, 260.01 , 260.408, 261.77 ,
263.794, 265.708, 267.388, 269.546, 271.8 , 274.023,
276.301, 277.785, 277.785, 277.785, 277.785])
self["N2O"] = numpy.array([ 0.00843998, 0.00675998, 0.00550998, 0.00451998, 0.00367998,
0.00292998, 0.00182999, 0.00093999, 0.00086 , 0.00346998,
0.00574997, 0.00809995, 0.01049994, 0.01381992, 0.0167699 ,
0.01908988, 0.02167987, 0.02497986, 0.02886984, 0.03770981,
0.04611978, 0.06027973, 0.07687968, 0.09277963, 0.1074996 ,
0.1215696 , 0.1351095 , 0.1461595 , 0.1561495 , 0.1657995 ,
0.1742494 , 0.1789794 , 0.1835694 , 0.1880094 , 0.1941394 ,
0.2008494 , 0.2071794 , 0.2146194 , 0.2221894 , 0.2293794 ,
0.2371893 , 0.2450493 , 0.2529193 , 0.2607493 , 0.2684792 ,
0.2760492 , 0.2834092 , 0.2904892 , 0.2972092 , 0.3034892 ,
0.3092591 , 0.3144191 , 0.318879 , 0.320709 , 0.3223789 ,
0.3238688 , 0.3251487 , 0.3261884 , 0.3269782 , 0.327468 ,
0.3276377 , 0.3276374 , 0.3276372 , 0.3276368 , 0.3276365 ,
0.327636 , 0.3276354 , 0.3276347 , 0.3276337 , 0.3276324 ,
0.3276293 , 0.3276254 , 0.3276209 , 0.3276174 , 0.3276089 ,
0.3275958 , 0.3275743 , 0.3275597 , 0.3275503 , 0.3275458 ,
0.3275363 , 0.3275226 , 0.327505 , 0.3274883 , 0.3274758 ,
0.3274673 , 0.3274641 , 0.3273664 , 0.3270496 , 0.3268224 ,
0.3267366 , 0.3267089 , 0.3265639 , 0.3265066 , 0.3264792 ,
0.3264566 , 0.3264246 , 0.3263192 , 0.3263552 , 0.3263897 ,
0.3264229 ])
self["O3"] = numpy.array([ 0.1874915 , 0.2149024 , 0.285496 , 0.452577 , 0.6652036 ,
0.8636454 , 1.069974 , 1.339963 , 1.74506 , 2.367676 ,
3.20938 , 3.929546 , 4.632512 , 5.261088 , 5.711085 ,
5.883594 , 6.014724 , 6.133965 , 6.165117 , 6.02297 ,
5.613614 , 4.935388 , 4.273622 , 3.776325 , 3.563446 ,
3.711146 , 3.983026 , 3.953546 , 3.702878 , 3.374489 ,
3.12198 , 2.98387 , 2.886441 , 2.747311 , 2.547492 ,
2.304513 , 2.054054 , 1.818675 , 1.589585 , 1.352436 ,
1.123747 , 0.9392643 , 0.8266587 , 0.7792758 , 0.7783948 ,
0.8330277 , 0.9674253 , 0.9618644 , 0.8516127 , 0.7847689 ,
0.7466939 , 0.7239019 , 0.7068658 , 0.6795178 , 0.6289309 ,
0.552276 , 0.4571801 , 0.3607593 , 0.2771505 , 0.2096017 ,
0.1594669 , 0.125989 , 0.1039241 , 0.08256201, 0.06434251,
0.05068798, 0.04490537, 0.04165022, 0.03932574, 0.03722404,
0.03566284, 0.03436047, 0.03329735, 0.03248326, 0.03198487,
0.03175341, 0.03176373, 0.03195377, 0.03239443, 0.03349827,
0.03453746, 0.03505364, 0.03472099, 0.03362072, 0.03259216,
0.03219242, 0.03263077, 0.03360821, 0.03466293, 0.0363357 ,
0.03850264, 0.03980027, 0.03990021, 0.03985157, 0.03947275,
0.03814204, 0.03129189, 0.02650153, 0.02650445, 0.02650726,
0.02650996])
self["CH4"] = numpy.array([ 0.08335807, 0.1208587 , 0.1487335 , 0.1710033 , 0.204093 ,
0.2627246 , 0.2753886 , 0.2884114 , 0.3086782 , 0.3450409 ,
0.3803777 , 0.4445503 , 0.5298488 , 0.6725639 , 0.8022351 ,
0.9129405 , 1.009404 , 1.083974 , 1.152114 , 1.199524 ,
1.244704 , 1.297344 , 1.352464 , 1.405274 , 1.474424 ,
1.543284 , 1.609544 , 1.646784 , 1.673834 , 1.669345 ,
1.664525 , 1.659355 , 1.653835 , 1.638835 , 1.624395 ,
1.610745 , 1.598205 , 1.587065 , 1.588215 , 1.589415 ,
1.590696 , 1.592025 , 1.593435 , 1.623955 , 1.645655 ,
1.668345 , 1.686135 , 1.700245 , 1.713615 , 1.718985 ,
1.724565 , 1.730115 , 1.735775 , 1.740904 , 1.744604 ,
1.748444 , 1.750363 , 1.752232 , 1.753511 , 1.754389 ,
1.755058 , 1.755206 , 1.755345 , 1.755023 , 1.754691 ,
1.753979 , 1.753136 , 1.752172 , 1.751106 , 1.75008 ,
1.749123 , 1.748182 , 1.747308 , 1.746459 , 1.745774 ,
1.745114 , 1.74469 , 1.744322 , 1.744063 , 1.743828 ,
1.743578 , 1.743295 , 1.742992 , 1.742723 , 1.742476 ,
1.742291 , 1.742144 , 1.741505 , 1.73969 , 1.738371 ,
1.737825 , 1.737618 , 1.736797 , 1.736442 , 1.736296 ,
1.736177 , 1.736016 , 1.735456 , 1.735647 , 1.735831 ,
1.736007 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 277.785
self["S2M"]["Q"] = 3714.67970528
self["S2M"]["O"] = 0.0265099568307
self["S2M"]["P"] = 1003.55103
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 277.785
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -47.333
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 8, 1])
self["TIME"] = numpy.array([0, 0, 0])
| true | true |
f725070abe59440c81ec609b73017feaae140853 | 4,547 | py | Python | mkt/home/tests/test_views.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/home/tests/test_views.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/home/tests/test_views.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from amo.tests import app_factory, mock_es
from amo.urlresolvers import reverse
import mkt
from mkt.browse.tests.test_views import BrowseBase
from mkt.webapps.models import Webapp
from mkt.zadmin.models import FeaturedApp, FeaturedAppRegion
class TestHome(BrowseBase):
def setUp(self):
super(TestHome, self).setUp()
self.url = reverse('home')
# TODO: Remove log-in bit when we remove `request.can_view_consumer`.
assert self.client.login(username='steamcube@mozilla.com',
password='password')
@mock_es
def test_no_paypal_js(self):
self.create_switch('enabled-paypal', active=False)
resp = self.client.get(self.url)
assert not settings.PAYPAL_JS_URL in resp.content, (
'When PayPal is disabled, its JS lib should not load')
@mock_es
def test_load_paypal_js(self):
self.create_switch('enabled-paypal')
resp = self.client.get(self.url)
assert settings.PAYPAL_JS_URL in resp.content, (
'When PayPal is enabled, its JS lib should load')
@mock_es
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'home/home.html')
@mock_es
def test_featured_desktop(self):
a, b, c, d = self.setup_featured(4)
# Check that the Home featured app is shown only in US region.
for region in mkt.regions.REGIONS_DICT:
pks = self.get_pks('featured', self.url,
{'region': region})
self.assertSetEqual(pks, [c.id, d.id] if region == 'us' else [])
@mock_es
def test_featured_mobile(self):
a, b, c, d = self.setup_featured(4)
# Check that the Home featured app is shown only in US region.
for region in mkt.regions.REGIONS_DICT:
pks = self.get_pks('featured', self.url,
{'region': region, 'mobile': 'true'})
self.assertSetEqual(pks, [d.id] if region == 'us' else [])
def test_featured_src(self):
_, _, app = self.setup_featured()
r = self.client.get(self.url)
eq_(pq(r.content)('.mkt-tile').attr('href'),
app.get_detail_url() + '?src=mkt-home')
def test_tile_no_rating_link(self):
r = self.client.get(self.url)
assert not pq(r.content)('.mkt-tile .rating_link')
@mock_es
def test_featured_region_exclusions(self):
self._test_featured_region_exclusions()
@mock_es
def test_featured_fallback_to_worldwide(self):
a, b, c = self.setup_featured()
worldwide_apps = [app_factory().id for x in xrange(5)]
for app in worldwide_apps:
fa = FeaturedApp.objects.create(app_id=app, category=None)
FeaturedAppRegion.objects.create(featured_app=fa,
region=mkt.regions.WORLDWIDE.id)
# In US: 1 US-featured app + 5 Worldwide-featured app.
# Elsewhere: 6 Worldwide-featured apps.
for region in mkt.regions.REGIONS_DICT:
if region == 'us':
expected = [c.id] + worldwide_apps[:5]
else:
expected = worldwide_apps
eq_(self.get_pks('featured', self.url, {'region': region}),
expected)
def test_popular(self):
self._test_popular()
def test_popular_region_exclusions(self):
self._test_popular_region_exclusions()
def make_time_limited_feature(self):
a = app_factory()
fa = self.make_featured(app=a, category=None)
fa.start_date = datetime.date(2012, 1, 1)
fa.end_date = datetime.date(2012, 2, 1)
fa.save()
return a
@mock_es
def test_featured_time_excluded(self):
a = self.make_time_limited_feature()
for d in [datetime.date(2012, 1, 1),
datetime.date(2012, 1, 15),
datetime.date(2012, 2, 1)]:
Webapp.now = staticmethod(lambda: d)
eq_(self.get_pks('featured', self.url, {'region': 'us'}),
[a.id])
@mock_es
def test_featured_time_included(self):
self.make_time_limited_feature()
for d in [datetime.date(2011, 12, 15),
datetime.date(2012, 2, 2)]:
Webapp.now = staticmethod(lambda: d)
eq_(self.get_pks('featured', self.url, {'region': 'us'}), [])
| 35.248062 | 77 | 0.610073 | import datetime
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from amo.tests import app_factory, mock_es
from amo.urlresolvers import reverse
import mkt
from mkt.browse.tests.test_views import BrowseBase
from mkt.webapps.models import Webapp
from mkt.zadmin.models import FeaturedApp, FeaturedAppRegion
class TestHome(BrowseBase):
def setUp(self):
super(TestHome, self).setUp()
self.url = reverse('home')
assert self.client.login(username='steamcube@mozilla.com',
password='password')
@mock_es
def test_no_paypal_js(self):
self.create_switch('enabled-paypal', active=False)
resp = self.client.get(self.url)
assert not settings.PAYPAL_JS_URL in resp.content, (
'When PayPal is disabled, its JS lib should not load')
@mock_es
def test_load_paypal_js(self):
self.create_switch('enabled-paypal')
resp = self.client.get(self.url)
assert settings.PAYPAL_JS_URL in resp.content, (
'When PayPal is enabled, its JS lib should load')
@mock_es
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'home/home.html')
@mock_es
def test_featured_desktop(self):
a, b, c, d = self.setup_featured(4)
for region in mkt.regions.REGIONS_DICT:
pks = self.get_pks('featured', self.url,
{'region': region})
self.assertSetEqual(pks, [c.id, d.id] if region == 'us' else [])
@mock_es
def test_featured_mobile(self):
a, b, c, d = self.setup_featured(4)
for region in mkt.regions.REGIONS_DICT:
pks = self.get_pks('featured', self.url,
{'region': region, 'mobile': 'true'})
self.assertSetEqual(pks, [d.id] if region == 'us' else [])
def test_featured_src(self):
_, _, app = self.setup_featured()
r = self.client.get(self.url)
eq_(pq(r.content)('.mkt-tile').attr('href'),
app.get_detail_url() + '?src=mkt-home')
def test_tile_no_rating_link(self):
r = self.client.get(self.url)
assert not pq(r.content)('.mkt-tile .rating_link')
@mock_es
def test_featured_region_exclusions(self):
self._test_featured_region_exclusions()
@mock_es
def test_featured_fallback_to_worldwide(self):
a, b, c = self.setup_featured()
worldwide_apps = [app_factory().id for x in xrange(5)]
for app in worldwide_apps:
fa = FeaturedApp.objects.create(app_id=app, category=None)
FeaturedAppRegion.objects.create(featured_app=fa,
region=mkt.regions.WORLDWIDE.id)
for region in mkt.regions.REGIONS_DICT:
if region == 'us':
expected = [c.id] + worldwide_apps[:5]
else:
expected = worldwide_apps
eq_(self.get_pks('featured', self.url, {'region': region}),
expected)
def test_popular(self):
self._test_popular()
def test_popular_region_exclusions(self):
self._test_popular_region_exclusions()
def make_time_limited_feature(self):
a = app_factory()
fa = self.make_featured(app=a, category=None)
fa.start_date = datetime.date(2012, 1, 1)
fa.end_date = datetime.date(2012, 2, 1)
fa.save()
return a
@mock_es
def test_featured_time_excluded(self):
a = self.make_time_limited_feature()
for d in [datetime.date(2012, 1, 1),
datetime.date(2012, 1, 15),
datetime.date(2012, 2, 1)]:
Webapp.now = staticmethod(lambda: d)
eq_(self.get_pks('featured', self.url, {'region': 'us'}),
[a.id])
@mock_es
def test_featured_time_included(self):
self.make_time_limited_feature()
for d in [datetime.date(2011, 12, 15),
datetime.date(2012, 2, 2)]:
Webapp.now = staticmethod(lambda: d)
eq_(self.get_pks('featured', self.url, {'region': 'us'}), [])
| true | true |
f725072ba5ab89efad25c3839e4eab5683dd5e8a | 9,159 | py | Python | epgbackup/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgbackup/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgbackup/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | # for localized messages
from . import _
# Config
from Components.config import config, ConfigYesNo, ConfigNumber, ConfigSelection, \
ConfigSubsection, ConfigSelectionNumber, ConfigDirectory, NoSave
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Tools.BoundFunction import boundFunction
# Error-print
from EPGBackupTools import debugOut, PLUGIN_VERSION
from traceback import format_exc
extPrefix = _("EXTENSIONMENU_PREFIX")
config.plugins.epgbackup = ConfigSubsection()
# Do not change order of choices
config.plugins.epgbackup.show_setup_in = ConfigSelection(choices=[
("extension", _("extensions")),
("plugin", _("pluginmenue")),
("both", _("extensions") + "/" + _("pluginmenue")),
("system", _("systemmenue")),
], default="both")
config.plugins.epgbackup.show_make_backup_in_extmenu = ConfigYesNo(default=False)
config.plugins.epgbackup.show_backuprestore_in_extmenu = ConfigYesNo(default=False)
config.plugins.epgbackup.backup_enabled = ConfigYesNo(default=True)
config.plugins.epgbackup.make_backup_after_unsuccess_restore = ConfigYesNo(default=True)
config.plugins.epgbackup.callAfterEPGRefresh = ConfigYesNo(default=True)
config.plugins.epgbackup.backupSaveInterval = ConfigSelection(choices=[
("-1", _("backup timer disabled")),
("30", _("30 minutes")),
("60", _("1 hour")),
("300", _("6 hours")),
("1200", _("1 day")),
], default="-1")
config.plugins.epgbackup.show_messages_background = ConfigYesNo(default=True)
config.plugins.epgbackup.filesize_valid = ConfigSelectionNumber(min=1,
max=20, stepwidth=1, default=3, wraparound=True)
config.plugins.epgbackup.timespan_valid = ConfigNumber(default=7)
config.plugins.epgbackup.showadvancedoptions = NoSave(ConfigYesNo(default=False))
config.plugins.epgbackup.epgwrite_wait = ConfigNumber(default=3)
config.plugins.epgbackup.showin_usr_scripts = ConfigYesNo(default=True)
config.plugins.epgbackup.backup_strategy = ConfigSelection(choices=[
("youngest_before_biggest", _("Youngest before Biggest"), _("The youngest file from the saved backup-files will be restored.\nIf it is older than the current existing EPG-file and the EPG-file isn't valid then the biggest backup-file will be restored.")),
("biggest_before_youngest", _("Biggest before Youngest"), _("The biggest file from the saved backup-files will be restored.\nIf it is smaller than the current existing EPG-file and the EPG-file isn't valid then the youngest backup-file will be restored.")),
("youngest", _("Only younger"), _("The backup-file will only be restored if it is younger than the current existing EPG-file.")),
("biggest", _("Only bigger"), _("The backup-file will only be restored if it is greater than the current existing EPG-file.")),
], default="youngest_before_biggest"
)
config.plugins.epgbackup.enable_debug = ConfigYesNo(default=False)
config.plugins.epgbackup.plugin_debug_in_file = ConfigYesNo(default=False)
config.plugins.epgbackup.backup_log_dir = ConfigDirectory(default="/tmp")
config.plugins.epgbackup.max_boot_count = ConfigNumber(default=3)
try:
from Components.Language import language
from Plugins.SystemPlugins.MPHelp import registerHelp, XMLHelpReader
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, fileExists
lang = language.getLanguage()[:2]
HELPPATH = resolveFilename(SCOPE_PLUGINS, "Extensions/EPGBackup")
if fileExists(HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"):
helpfile = HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"
else:
helpfile = HELPPATH + "/mphelp.xml"
reader = XMLHelpReader(helpfile)
epgBackuphHelp = registerHelp(*reader)
except:
debugOut("Help-Error:\n" + str(format_exc()), forced=True)
epgBackuphHelp = None
# Plugin
epgbackup = None
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
gUserScriptExists = False
# Autostart
def autostart(reason, **kwargs):
global epgbackup
global gUserScriptExists
if reason == 0 and "session" in kwargs:
session = kwargs["session"]
from EPGBackupSupport import EPGBackupSupport
try:
epgbackup = EPGBackupSupport(session)
except:
debugOut("Error while initializing EPGBackupSupport:\n" + str(format_exc()), forced=True)
try:
from Plugins.Extensions.UserScripts.plugin import UserScriptsConfiguration
gUserScriptExists = True
del UserScriptsConfiguration
except:
pass
def openconfig(session, **kwargs):
try:
from EPGBackupConfig import EPGBackupConfig
session.openWithCallback(doneConfiguring, EPGBackupConfig)
except:
debugOut("Config-Import-Error:\n" + str(format_exc()), forced=True)
def showinSetup(menuid):
if menuid == "system":
return [(extPrefix + " " + _("EXTENSIONNAME_SETUP"), openconfig, "EPGBackupConfig", None)]
return []
def makeBackup(session, **kwargs):
epgbackup.makeBackup(interactive=True)
def restoreBackup(session, **kwargs):
epgbackup.forceDefaultRestore()
def doneConfiguring(session, needsRestart):
if needsRestart:
session.openWithCallback(boundFunction(restartGUICB, session), MessageBox,
_("To apply your Changes the GUI has to be restarted.\nDo you want to restart the GUI now?"),
MessageBox.TYPE_YESNO, title=_("EPGBackup Config V %s") % (PLUGIN_VERSION), timeout=30)
def restartGUICB(session, answer):
if answer is True:
session.open(TryQuitMainloop, 3)
SetupPlugDescExt = PluginDescriptor(name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Backup and restore EPG Data, including integration of EPGRefresh-plugin"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=openconfig,
needsRestart=False)
SetupPlugDescPlug = PluginDescriptor(name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Backup and restore EPG Data, including integration of EPGRefresh-plugin"), where=PluginDescriptor.WHERE_PLUGINMENU,
fnc=openconfig,
needsRestart=False)
MakePlugDescExt = PluginDescriptor(name=extPrefix + " " + _("Make Backup"),
description=_("Start making a Backup"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=makeBackup,
needsRestart=False)
RestorePlugDescExt = PluginDescriptor(name=extPrefix + " " + _("Restore Backup"),
description=_("Start a Restore of a Backup"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=restoreBackup,
needsRestart=False)
def AdjustPlugin(enable, PlugDescriptor):
try:
if enable:
plugins.addPlugin(PlugDescriptor)
else:
plugins.removePlugin(PlugDescriptor)
except ValueError:
pass
except:
debugOut("AdjustPlugin-Error:\n" + str(format_exc()), forced=True)
def PluginHousekeeping(configentry):
PlugDescInstall = []
PlugDescDeinstall = []
# value == extension: prior config-entry is both, so extension has not to be added
# value == both: prior config-entry is plugin, so only extension must be added
if configentry == config.plugins.epgbackup.show_setup_in:
# systemmenu don't have to be adjusted, because restart is required
if config.plugins.epgbackup.show_setup_in.value == "extension":
PlugDescDeinstall.append(SetupPlugDescPlug)
elif config.plugins.epgbackup.show_setup_in.value == "plugin":
PlugDescInstall.append(SetupPlugDescPlug)
PlugDescDeinstall.append(SetupPlugDescExt)
elif config.plugins.epgbackup.show_setup_in.value == "both":
PlugDescInstall.append(SetupPlugDescExt)
elif configentry == config.plugins.epgbackup.show_make_backup_in_extmenu:
if configentry.value:
PlugDescInstall.append(MakePlugDescExt)
else:
PlugDescDeinstall.append(MakePlugDescExt)
elif configentry == config.plugins.epgbackup.show_backuprestore_in_extmenu:
if configentry.value:
PlugDescInstall.append(RestorePlugDescExt)
else:
PlugDescDeinstall.append(RestorePlugDescExt)
for PlugDescriptor in PlugDescDeinstall:
AdjustPlugin(False, PlugDescriptor)
for PlugDescriptor in PlugDescInstall:
AdjustPlugin(True, PlugDescriptor)
config.plugins.epgbackup.show_setup_in.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
config.plugins.epgbackup.show_make_backup_in_extmenu.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
config.plugins.epgbackup.show_backuprestore_in_extmenu.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
def Plugins(**kwargs):
pluginList = [
PluginDescriptor(
where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART],
fnc=autostart)
]
if config.plugins.epgbackup.show_setup_in.value == "system":
pluginList.append(PluginDescriptor(
name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Keep EPG-Data over Crashes"),
where=PluginDescriptor.WHERE_MENU,
fnc=showinSetup,
needsRestart=False)
)
else:
if config.plugins.epgbackup.show_setup_in.value in ("plugin", "both"):
pluginList.append(SetupPlugDescPlug)
if config.plugins.epgbackup.show_setup_in.value in ("extension", "both"):
pluginList.append(SetupPlugDescExt)
if config.plugins.epgbackup.show_make_backup_in_extmenu.value:
pluginList.append(MakePlugDescExt)
if config.plugins.epgbackup.show_backuprestore_in_extmenu.value:
pluginList.append(RestorePlugDescExt)
return pluginList
| 39.821739 | 259 | 0.780653 |
from . import _
from Components.config import config, ConfigYesNo, ConfigNumber, ConfigSelection, \
ConfigSubsection, ConfigSelectionNumber, ConfigDirectory, NoSave
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Tools.BoundFunction import boundFunction
from EPGBackupTools import debugOut, PLUGIN_VERSION
from traceback import format_exc
extPrefix = _("EXTENSIONMENU_PREFIX")
config.plugins.epgbackup = ConfigSubsection()
config.plugins.epgbackup.show_setup_in = ConfigSelection(choices=[
("extension", _("extensions")),
("plugin", _("pluginmenue")),
("both", _("extensions") + "/" + _("pluginmenue")),
("system", _("systemmenue")),
], default="both")
config.plugins.epgbackup.show_make_backup_in_extmenu = ConfigYesNo(default=False)
config.plugins.epgbackup.show_backuprestore_in_extmenu = ConfigYesNo(default=False)
config.plugins.epgbackup.backup_enabled = ConfigYesNo(default=True)
config.plugins.epgbackup.make_backup_after_unsuccess_restore = ConfigYesNo(default=True)
config.plugins.epgbackup.callAfterEPGRefresh = ConfigYesNo(default=True)
config.plugins.epgbackup.backupSaveInterval = ConfigSelection(choices=[
("-1", _("backup timer disabled")),
("30", _("30 minutes")),
("60", _("1 hour")),
("300", _("6 hours")),
("1200", _("1 day")),
], default="-1")
config.plugins.epgbackup.show_messages_background = ConfigYesNo(default=True)
config.plugins.epgbackup.filesize_valid = ConfigSelectionNumber(min=1,
max=20, stepwidth=1, default=3, wraparound=True)
config.plugins.epgbackup.timespan_valid = ConfigNumber(default=7)
config.plugins.epgbackup.showadvancedoptions = NoSave(ConfigYesNo(default=False))
config.plugins.epgbackup.epgwrite_wait = ConfigNumber(default=3)
config.plugins.epgbackup.showin_usr_scripts = ConfigYesNo(default=True)
config.plugins.epgbackup.backup_strategy = ConfigSelection(choices=[
("youngest_before_biggest", _("Youngest before Biggest"), _("The youngest file from the saved backup-files will be restored.\nIf it is older than the current existing EPG-file and the EPG-file isn't valid then the biggest backup-file will be restored.")),
("biggest_before_youngest", _("Biggest before Youngest"), _("The biggest file from the saved backup-files will be restored.\nIf it is smaller than the current existing EPG-file and the EPG-file isn't valid then the youngest backup-file will be restored.")),
("youngest", _("Only younger"), _("The backup-file will only be restored if it is younger than the current existing EPG-file.")),
("biggest", _("Only bigger"), _("The backup-file will only be restored if it is greater than the current existing EPG-file.")),
], default="youngest_before_biggest"
)
config.plugins.epgbackup.enable_debug = ConfigYesNo(default=False)
config.plugins.epgbackup.plugin_debug_in_file = ConfigYesNo(default=False)
config.plugins.epgbackup.backup_log_dir = ConfigDirectory(default="/tmp")
config.plugins.epgbackup.max_boot_count = ConfigNumber(default=3)
try:
from Components.Language import language
from Plugins.SystemPlugins.MPHelp import registerHelp, XMLHelpReader
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, fileExists
lang = language.getLanguage()[:2]
HELPPATH = resolveFilename(SCOPE_PLUGINS, "Extensions/EPGBackup")
if fileExists(HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"):
helpfile = HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"
else:
helpfile = HELPPATH + "/mphelp.xml"
reader = XMLHelpReader(helpfile)
epgBackuphHelp = registerHelp(*reader)
except:
debugOut("Help-Error:\n" + str(format_exc()), forced=True)
epgBackuphHelp = None
epgbackup = None
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
gUserScriptExists = False
def autostart(reason, **kwargs):
global epgbackup
global gUserScriptExists
if reason == 0 and "session" in kwargs:
session = kwargs["session"]
from EPGBackupSupport import EPGBackupSupport
try:
epgbackup = EPGBackupSupport(session)
except:
debugOut("Error while initializing EPGBackupSupport:\n" + str(format_exc()), forced=True)
try:
from Plugins.Extensions.UserScripts.plugin import UserScriptsConfiguration
gUserScriptExists = True
del UserScriptsConfiguration
except:
pass
def openconfig(session, **kwargs):
try:
from EPGBackupConfig import EPGBackupConfig
session.openWithCallback(doneConfiguring, EPGBackupConfig)
except:
debugOut("Config-Import-Error:\n" + str(format_exc()), forced=True)
def showinSetup(menuid):
if menuid == "system":
return [(extPrefix + " " + _("EXTENSIONNAME_SETUP"), openconfig, "EPGBackupConfig", None)]
return []
def makeBackup(session, **kwargs):
epgbackup.makeBackup(interactive=True)
def restoreBackup(session, **kwargs):
epgbackup.forceDefaultRestore()
def doneConfiguring(session, needsRestart):
if needsRestart:
session.openWithCallback(boundFunction(restartGUICB, session), MessageBox,
_("To apply your Changes the GUI has to be restarted.\nDo you want to restart the GUI now?"),
MessageBox.TYPE_YESNO, title=_("EPGBackup Config V %s") % (PLUGIN_VERSION), timeout=30)
def restartGUICB(session, answer):
if answer is True:
session.open(TryQuitMainloop, 3)
SetupPlugDescExt = PluginDescriptor(name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Backup and restore EPG Data, including integration of EPGRefresh-plugin"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=openconfig,
needsRestart=False)
SetupPlugDescPlug = PluginDescriptor(name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Backup and restore EPG Data, including integration of EPGRefresh-plugin"), where=PluginDescriptor.WHERE_PLUGINMENU,
fnc=openconfig,
needsRestart=False)
MakePlugDescExt = PluginDescriptor(name=extPrefix + " " + _("Make Backup"),
description=_("Start making a Backup"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=makeBackup,
needsRestart=False)
RestorePlugDescExt = PluginDescriptor(name=extPrefix + " " + _("Restore Backup"),
description=_("Start a Restore of a Backup"), where=PluginDescriptor.WHERE_EXTENSIONSMENU,
fnc=restoreBackup,
needsRestart=False)
def AdjustPlugin(enable, PlugDescriptor):
try:
if enable:
plugins.addPlugin(PlugDescriptor)
else:
plugins.removePlugin(PlugDescriptor)
except ValueError:
pass
except:
debugOut("AdjustPlugin-Error:\n" + str(format_exc()), forced=True)
def PluginHousekeeping(configentry):
PlugDescInstall = []
PlugDescDeinstall = []
if configentry == config.plugins.epgbackup.show_setup_in:
if config.plugins.epgbackup.show_setup_in.value == "extension":
PlugDescDeinstall.append(SetupPlugDescPlug)
elif config.plugins.epgbackup.show_setup_in.value == "plugin":
PlugDescInstall.append(SetupPlugDescPlug)
PlugDescDeinstall.append(SetupPlugDescExt)
elif config.plugins.epgbackup.show_setup_in.value == "both":
PlugDescInstall.append(SetupPlugDescExt)
elif configentry == config.plugins.epgbackup.show_make_backup_in_extmenu:
if configentry.value:
PlugDescInstall.append(MakePlugDescExt)
else:
PlugDescDeinstall.append(MakePlugDescExt)
elif configentry == config.plugins.epgbackup.show_backuprestore_in_extmenu:
if configentry.value:
PlugDescInstall.append(RestorePlugDescExt)
else:
PlugDescDeinstall.append(RestorePlugDescExt)
for PlugDescriptor in PlugDescDeinstall:
AdjustPlugin(False, PlugDescriptor)
for PlugDescriptor in PlugDescInstall:
AdjustPlugin(True, PlugDescriptor)
config.plugins.epgbackup.show_setup_in.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
config.plugins.epgbackup.show_make_backup_in_extmenu.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
config.plugins.epgbackup.show_backuprestore_in_extmenu.addNotifier(PluginHousekeeping, initial_call=False, immediate_feedback=True)
def Plugins(**kwargs):
pluginList = [
PluginDescriptor(
where=[PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART],
fnc=autostart)
]
if config.plugins.epgbackup.show_setup_in.value == "system":
pluginList.append(PluginDescriptor(
name=extPrefix + " " + _("EXTENSIONNAME_SETUP"),
description=_("Keep EPG-Data over Crashes"),
where=PluginDescriptor.WHERE_MENU,
fnc=showinSetup,
needsRestart=False)
)
else:
if config.plugins.epgbackup.show_setup_in.value in ("plugin", "both"):
pluginList.append(SetupPlugDescPlug)
if config.plugins.epgbackup.show_setup_in.value in ("extension", "both"):
pluginList.append(SetupPlugDescExt)
if config.plugins.epgbackup.show_make_backup_in_extmenu.value:
pluginList.append(MakePlugDescExt)
if config.plugins.epgbackup.show_backuprestore_in_extmenu.value:
pluginList.append(RestorePlugDescExt)
return pluginList
| true | true |
f725089c2e562403e45979d33cb8bab9a94933e2 | 6,399 | py | Python | OMG/env/random_load.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | 2 | 2021-12-16T12:49:26.000Z | 2022-01-28T19:18:43.000Z | OMG/env/random_load.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | null | null | null | OMG/env/random_load.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from openmodelica_microgrid_gym.util import RandProcess
class RandomLoad:
def __init__(self, train_episode_length: int, ts: float, rand_process: RandProcess, loadstep_time: int = None,
load_curve: pd.DataFrame = None, bounds=None, bounds_std=None):
"""
:param max_episode_steps: number of steps per training episode (can differ from env.max_episode_steps)
:param ts: sampletime of env
:param rand_pocess: Instance of random process defines noise added to load
:param loadstep_time: number of env step where load step should happen
:param load_curve: Stored load data to sample from instead of smaple from distribution
:param bounds: Bounds to clip the sampled load data
:param bounds_std: Chosen bounds are sampled from a distribution with std=bounds_std and mean=bounds
"""
self.train_episode_length = train_episode_length
self.ts = ts
self.rand_process = rand_process
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
self.load_curve = load_curve
if bounds is None:
self.bounds = (-np.inf, np.inf)
else:
self.bounds = bounds
if bounds_std is None:
self.bounds_std = (0, 0)
else:
self.bounds_std = bounds_std
self.lowerbound_std = 0
self.upperbound_std = 0
def reset(self, loadstep_time=None):
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
def load_step(self, t, gain):
"""
Changes the load parameters
:param t:
:param gain: device parameter
:return: Sample from SP
"""
# Defines a load step after 0.01 s
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
self.rand_process.proc.mean = gain * 0.55
self.rand_process.reserve = gain * 0.55
elif t <= self.ts:
self.rand_process.proc.mean = gain
return self.rand_process.sample(t)
def clipped_step(self, t):
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def one_random_loadstep_per_episode(self, t):
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
# do with 100 percent propability
self.do_change(1002, 102)
# else:
# with 2 permill change drift
# self.do_change(2, 0)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def give_dataframe_value(self, t, col):
"""
Gives load values from a stored dataframe (self.load_curve)
:parma t: time - represents here the row of the dataframe
:param col: colon name of the dataframe (typically str)
"""
if t < 0:
# return None
return self.load_curve[col][0]
if self.load_curve is None:
raise ValueError('No dataframe given! Please feed load class (.load_curve) with data')
return self.load_curve[col][int(t / self.ts)]
def random_load_step(self, t, event_prob: int = 2, step_prob: int = 50):
"""
Changes the load parameters applying a loadstep with 0.2% probability which is a pure step with 50 %
probability otherwise a drift. In every event the random process variance is drawn randomly [1, 150].
:param t: time
:param event_prob: probability (in pre mill) that the step event is triggered in the current step
:param step_prob: probability (in pre cent) that event is a abrupt step (drift otherwise!, random process speed
not adjustable yet
:return: Sample from SP
"""
# Changes rand process data with probability of 5% and sets new value randomly
if np.random.randint(0, 1001) < 2:
gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
# define sdt for clipping once every event
# np.maximum to not allow negative values
self.lowerbound_std = np.maximum(np.random.normal(scale=self.bounds_std[0]), 0.0001)
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
# With 50% probability do a step or a drift
if np.random.randint(0, 101) < 50:
# step
self.rand_process.reserve = gain
else:
# drift -> Lower speed to allow
self.rand_process.proc.speed = np.random.randint(10, 100)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def do_change(self, event_prob_permill=2, step_prob_percent=50):
if np.random.randint(0, 1001) < event_prob_permill:
gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
# define sdt for clipping once every event
self.lowerbound_std = np.random.normal(scale=self.bounds_std[0])
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
# With 50% probability do a step or a drift
if np.random.randint(0, 101) < step_prob_percent:
# step
self.rand_process.reserve = gain
else:
# drift -> Lower speed to allow
self.rand_process.proc.speed = np.random.randint(10, 100)
| 41.823529 | 119 | 0.609314 | import numpy as np
import pandas as pd
from openmodelica_microgrid_gym.util import RandProcess
class RandomLoad:
def __init__(self, train_episode_length: int, ts: float, rand_process: RandProcess, loadstep_time: int = None,
load_curve: pd.DataFrame = None, bounds=None, bounds_std=None):
self.train_episode_length = train_episode_length
self.ts = ts
self.rand_process = rand_process
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
self.load_curve = load_curve
if bounds is None:
self.bounds = (-np.inf, np.inf)
else:
self.bounds = bounds
if bounds_std is None:
self.bounds_std = (0, 0)
else:
self.bounds_std = bounds_std
self.lowerbound_std = 0
self.upperbound_std = 0
def reset(self, loadstep_time=None):
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
def load_step(self, t, gain):
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
self.rand_process.proc.mean = gain * 0.55
self.rand_process.reserve = gain * 0.55
elif t <= self.ts:
self.rand_process.proc.mean = gain
return self.rand_process.sample(t)
def clipped_step(self, t):
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def one_random_loadstep_per_episode(self, t):
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
self.do_change(1002, 102)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def give_dataframe_value(self, t, col):
if t < 0:
return self.load_curve[col][0]
if self.load_curve is None:
raise ValueError('No dataframe given! Please feed load class (.load_curve) with data')
return self.load_curve[col][int(t / self.ts)]
def random_load_step(self, t, event_prob: int = 2, step_prob: int = 50):
if np.random.randint(0, 1001) < 2:
gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
self.lowerbound_std = np.maximum(np.random.normal(scale=self.bounds_std[0]), 0.0001)
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
if np.random.randint(0, 101) < 50:
self.rand_process.reserve = gain
else:
self.rand_process.proc.speed = np.random.randint(10, 100)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def do_change(self, event_prob_permill=2, step_prob_percent=50):
if np.random.randint(0, 1001) < event_prob_permill:
gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
self.lowerbound_std = np.random.normal(scale=self.bounds_std[0])
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
if np.random.randint(0, 101) < step_prob_percent:
self.rand_process.reserve = gain
else:
self.rand_process.proc.speed = np.random.randint(10, 100)
| true | true |
f72508f773fd8c5c239a480ae2c67e066c971dd2 | 1,265 | py | Python | api/migrations/0022_auto_20150222_0024.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 65 | 2017-12-29T12:28:11.000Z | 2022-03-15T06:42:26.000Z | api/migrations/0022_auto_20150222_0024.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 31 | 2017-12-18T02:03:09.000Z | 2022-01-13T00:43:35.000Z | api/migrations/0022_auto_20150222_0024.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 7 | 2018-08-27T15:11:01.000Z | 2021-08-16T05:15:13.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0021_card_video_story'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='following',
field=models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='owner',
field=models.ForeignKey(related_name='accounts_set', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='card',
field=models.ForeignKey(related_name='ownedcards', to='api.Card'),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='owner_account',
field=models.ForeignKey(related_name='ownedcards', to='api.Account'),
preserve_default=True,
),
]
| 30.853659 | 96 | 0.611858 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0021_card_video_story'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='following',
field=models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='owner',
field=models.ForeignKey(related_name='accounts_set', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='card',
field=models.ForeignKey(related_name='ownedcards', to='api.Card'),
preserve_default=True,
),
migrations.AlterField(
model_name='ownedcard',
name='owner_account',
field=models.ForeignKey(related_name='ownedcards', to='api.Account'),
preserve_default=True,
),
]
| true | true |
f725091c50677d690c2ec6cbbf02012349ecebe0 | 109,596 | py | Python | numpy/core/tests/test_datetime.py | kurtamohler/numpy | 73157efcd17da95ce984d1595ac4907233b9dbf5 | [
"BSD-3-Clause"
] | 1 | 2022-02-27T15:07:29.000Z | 2022-02-27T15:07:29.000Z | numpy/core/tests/test_datetime.py | kurtamohler/numpy | 73157efcd17da95ce984d1595ac4907233b9dbf5 | [
"BSD-3-Clause"
] | 41 | 2019-04-01T15:52:29.000Z | 2021-09-07T00:15:51.000Z | numpy/core/tests/test_datetime.py | kurtamohler/numpy | 73157efcd17da95ce984d1595ac4907233b9dbf5 | [
"BSD-3-Clause"
] | 4 | 2021-06-25T08:40:39.000Z | 2021-08-08T09:52:42.000Z |
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
assert_raises_regex,
)
from numpy.compat import pickle
# Use pytz to test out various time zones if available
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool_(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
# test all date / time units and use
# "generic" to select generic unit
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
# regression test for gh-7617
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
# Regression test for gh-11096
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
# Regression test for gh-11151
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
# expected value from the array constructor workaround
# described in above issue
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
# NaN -> NaT
nan = np.array([np.nan] * 8)
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
nat = np.array([np.datetime64('NaT')] * 8)
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8)
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Unicode to datetime
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
# Datetime to long string - gh-9712
assert_equal(str_a, dt_a.astype((np.string_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
# exactly in a double should be preserved if we avoid
# casting to double in floordiv operation
(9007199254740993, 1),
# stress the alternate floordiv code path where
# operand signs don't match and remainder isn't 0
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
# Python reference integer floor
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6.0 / 9.0)
assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
assert_equal(tdb / tda, 9.0 / 6.0)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in true\_divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'us')),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for modulus operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
# timedelta64
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
# NOTE: some of the operations may be supported
# in the future
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
# Test that only datetime dtype arrays are accepted
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
# At some point this caused a stack overflow (gh-11154). Now raises
# ValueError since the nested list cannot be converted to a datetime.
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10") # try a numpy string type
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
# compound units
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
# above should not have overflowed
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
# TODO: add absolute (gold standard) time span limit strings
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
# Convert to string and back. Explicit unit needed since the day and
# week reprs are not distinguishable.
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
# byte units are converted to unicode
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
# μs is normalized to μ
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
| 45.494396 | 101 | 0.53845 |
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
assert_raises_regex,
)
from numpy.compat import pickle
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError
class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
b = np.bool_(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
assert_equal(np.timedelta64(), np.timedelta64(0))
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_scalar_construction_units(self):
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
nan = np.array([np.nan] * 8)
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
nat = np.array([np.datetime64('NaT')] * 8)
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8)
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
assert_equal(str_a, dt_a.astype((np.string_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
(9007199254740993, 1),
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6.0 / 9.0)
assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
assert_equal(tdb / tda, 9.0 / 6.0)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in true\_divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'us')),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_datetime_busday_offset(self):
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
assert_raises(ValueError, np.busdaycalendar, weekmask="")
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10")
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
| true | true |
f7250920aa8cce465657186e7f5d41dd1494786a | 2,066 | py | Python | azure-mgmt-datamigration/azure/mgmt/datamigration/models/project_file_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-05-10T19:58:09.000Z | 2019-05-10T19:58:09.000Z | azure-mgmt-datamigration/azure/mgmt/datamigration/models/project_file_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-datamigration/azure/mgmt/datamigration/models/project_file_properties_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-07-28T14:50:54.000Z | 2021-07-28T14:50:54.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProjectFileProperties(Model):
"""Base class for file properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:param extension: Optional File extension. If submitted it should not have
a leading period and must match the extension from filePath.
:type extension: str
:param file_path: Relative path of this file resource. This property can
be set when creating or updating the file resource.
:type file_path: str
:ivar last_modified: Modification DateTime.
:vartype last_modified: datetime
:param media_type: File content type. This propery can be modified to
reflect the file content type.
:type media_type: str
:ivar size: File size.
:vartype size: long
"""
_validation = {
'last_modified': {'readonly': True},
'size': {'readonly': True},
}
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'media_type': {'key': 'mediaType', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
}
def __init__(self, *, extension: str=None, file_path: str=None, media_type: str=None, **kwargs) -> None:
super(ProjectFileProperties, self).__init__(**kwargs)
self.extension = extension
self.file_path = file_path
self.last_modified = None
self.media_type = media_type
self.size = None
| 36.892857 | 108 | 0.616167 |
from msrest.serialization import Model
class ProjectFileProperties(Model):
_validation = {
'last_modified': {'readonly': True},
'size': {'readonly': True},
}
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'media_type': {'key': 'mediaType', 'type': 'str'},
'size': {'key': 'size', 'type': 'long'},
}
def __init__(self, *, extension: str=None, file_path: str=None, media_type: str=None, **kwargs) -> None:
super(ProjectFileProperties, self).__init__(**kwargs)
self.extension = extension
self.file_path = file_path
self.last_modified = None
self.media_type = media_type
self.size = None
| true | true |
f72509bc9ac00b2ac21743261ec417182f6782d1 | 6,683 | py | Python | code/gtp/gtp.py | Go-Trojans/trojan-go | bf1160120e79fbb1cdd37fa08f17160fb133aa40 | [
"Artistic-1.0-cl8"
] | null | null | null | code/gtp/gtp.py | Go-Trojans/trojan-go | bf1160120e79fbb1cdd37fa08f17160fb133aa40 | [
"Artistic-1.0-cl8"
] | null | null | null | code/gtp/gtp.py | Go-Trojans/trojan-go | bf1160120e79fbb1cdd37fa08f17160fb133aa40 | [
"Artistic-1.0-cl8"
] | 1 | 2021-08-28T20:31:01.000Z | 2021-08-28T20:31:01.000Z | # GTP for Trojan-go
# Helper file
import re
def pre_engine(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.split("#")[0]
s = s.replace("\t", " ")
return s
def pre_controller(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.replace("\t", " ")
return s
def gtp_boolean(b):
return "true" if b else "false"
def gtp_list(l):
return "\n".join(l)
def gtp_color(color):
# an arbitrary choice amongst a number of possibilities
return {BLACK: "B", WHITE: "W"}[color]
def gtp_vertex(vertex):
if vertex == PASS:
return "pass"
elif vertex == RESIGN:
return "resign"
else:
x, y = vertex
return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[x - 1], y)
def gtp_move(color, vertex):
return " ".join([gtp_color(color), gtp_vertex(vertex)])
def parse_message(message):
message = pre_engine(message).strip()
first, rest = (message.split(" ", 1) + [None])[:2]
if first.isdigit():
message_id = int(first)
if rest is not None:
command, arguments = (rest.split(" ", 1) + [None])[:2]
else:
command, arguments = None, None
else:
message_id = None
command, arguments = first, rest
return message_id, command, arguments
WHITE = -1
BLACK = +1
EMPTY = 0
PASS = (0, 0)
RESIGN = "resign"
def parse_color(color):
if color.lower() in ["b", "black"]:
return BLACK
elif color.lower() in ["w", "white"]:
return WHITE
else:
return False
def parse_vertex(vertex_string):
# Translate the Vertex from command line to GO languages
if vertex_string is None:
return False
elif vertex_string.lower() == "pass":
return PASS
elif len(vertex_string) > 1:
x = "abcdefghjklmnopqrstuvwxyz".find(vertex_string[0].lower()) + 1
if x == 0:
return False
if vertex_string[1:].isdigit():
y = int(vertex_string[1:])
else:
return False
else:
return False
return (x, y)
def parse_move(move_string):
color_string, vertex_string = (move_string.split(" ") + [None])[:2]
color = parse_color(color_string)
if color is False:
return False
vertex = parse_vertex(vertex_string)
if vertex is False:
return False
return color, vertex
MIN_BOARD_SIZE = 7
MAX_BOARD_SIZE = 19
def format_success(message_id, response=None):
if response is None:
response = ""
else:
response = " {}".format(response)
if message_id:
return "={}{}\n\n".format(message_id, response)
else:
return "={}\n\n".format(response)
def format_error(message_id, response):
if response:
response = " {}".format(response)
if message_id:
return "?{}{}\n\n".format(message_id, response)
else:
return "?{}\n\n".format(response)
# Not used
class Engine(object):
def __init__(self, game_obj, name="gtp (python library)", version="0.2"):
self.size = 19
self.komi = 6.5
self._game = game_obj
self._game.clear()
self._name = name
self._version = version
self.disconnect = False
self.known_commands = [
field[4:] for field in dir(self) if field.startswith("cmd_")]
def send(self, message):
message_id, command, arguments = parse_message(message)
if command in self.known_commands:
try:
return format_success(
message_id, getattr(self, "cmd_" + command)(arguments))
except ValueError as exception:
return format_error(message_id, exception.args[0])
else:
return format_error(message_id, "unknown command")
def vertex_in_range(self, vertex):
if vertex == PASS:
return True
if 1 <= vertex[0] <= self.size and 1 <= vertex[1] <= self.size:
return True
else:
return False
# commands
def cmd_protocol_version(self, arguments):
return 2
def cmd_name(self, arguments):
return self._name
def cmd_version(self, arguments):
return self._version
def cmd_known_command(self, arguments):
return gtp_boolean(arguments in self.known_commands)
def cmd_list_commands(self, arguments):
return gtp_list(self.known_commands)
def cmd_quit(self, arguments):
self.disconnect = True
def cmd_boardsize(self, arguments):
if arguments.isdigit():
size = int(arguments)
if MIN_BOARD_SIZE <= size <= MAX_BOARD_SIZE:
self.size = size
self._game.set_size(size)
else:
raise ValueError("unacceptable size")
else:
raise ValueError("non digit size")
def cmd_clear_board(self, arguments):
self._game.clear()
def cmd_komi(self, arguments):
try:
komi = float(arguments)
self.komi = komi
self._game.set_komi(komi)
except ValueError:
raise ValueError("syntax error")
def cmd_play(self, arguments):
move = parse_move(arguments)
if move:
color, vertex = move
if self.vertex_in_range(vertex):
if self._game.make_move(color, vertex):
return
raise ValueError("illegal move")
def cmd_genmove(self, arguments):
c = parse_color(arguments)
if c:
move = self._game.get_move(c)
self._game.make_move(c, move)
return gtp_vertex(move)
else:
raise ValueError("unknown player: {}".format(arguments))
# Not used
class MinimalGame(object):
def __init__(self, size=19, komi=6.5):
self.size = size
self.komi = 6.5
self.board = [EMPTY] * (self.size * self.size)
def _flatten(self, vertex):
(x, y) = vertex
return (x - 1) * self.size + (y - 1)
def clear(self):
self.board = [EMPTY] * (self.size * self.size)
def make_move(self, color, vertex):
# no legality check other than the space being empty..
# no side-effects beyond placing the stone..
if vertex == PASS:
return True # noop
idx = self._flatten(vertex)
if self.board[idx] == EMPTY:
self.board[idx] = color
return True
else:
return False
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
def get_move(self, color):
# pass every time. At least it's legal
return (0, 0)
| 25.029963 | 77 | 0.572497 |
import re
def pre_engine(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.split("#")[0]
s = s.replace("\t", " ")
return s
def pre_controller(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.replace("\t", " ")
return s
def gtp_boolean(b):
return "true" if b else "false"
def gtp_list(l):
return "\n".join(l)
def gtp_color(color):
return {BLACK: "B", WHITE: "W"}[color]
def gtp_vertex(vertex):
if vertex == PASS:
return "pass"
elif vertex == RESIGN:
return "resign"
else:
x, y = vertex
return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[x - 1], y)
def gtp_move(color, vertex):
return " ".join([gtp_color(color), gtp_vertex(vertex)])
def parse_message(message):
message = pre_engine(message).strip()
first, rest = (message.split(" ", 1) + [None])[:2]
if first.isdigit():
message_id = int(first)
if rest is not None:
command, arguments = (rest.split(" ", 1) + [None])[:2]
else:
command, arguments = None, None
else:
message_id = None
command, arguments = first, rest
return message_id, command, arguments
WHITE = -1
BLACK = +1
EMPTY = 0
PASS = (0, 0)
RESIGN = "resign"
def parse_color(color):
if color.lower() in ["b", "black"]:
return BLACK
elif color.lower() in ["w", "white"]:
return WHITE
else:
return False
def parse_vertex(vertex_string):
if vertex_string is None:
return False
elif vertex_string.lower() == "pass":
return PASS
elif len(vertex_string) > 1:
x = "abcdefghjklmnopqrstuvwxyz".find(vertex_string[0].lower()) + 1
if x == 0:
return False
if vertex_string[1:].isdigit():
y = int(vertex_string[1:])
else:
return False
else:
return False
return (x, y)
def parse_move(move_string):
color_string, vertex_string = (move_string.split(" ") + [None])[:2]
color = parse_color(color_string)
if color is False:
return False
vertex = parse_vertex(vertex_string)
if vertex is False:
return False
return color, vertex
MIN_BOARD_SIZE = 7
MAX_BOARD_SIZE = 19
def format_success(message_id, response=None):
if response is None:
response = ""
else:
response = " {}".format(response)
if message_id:
return "={}{}\n\n".format(message_id, response)
else:
return "={}\n\n".format(response)
def format_error(message_id, response):
if response:
response = " {}".format(response)
if message_id:
return "?{}{}\n\n".format(message_id, response)
else:
return "?{}\n\n".format(response)
class Engine(object):
def __init__(self, game_obj, name="gtp (python library)", version="0.2"):
self.size = 19
self.komi = 6.5
self._game = game_obj
self._game.clear()
self._name = name
self._version = version
self.disconnect = False
self.known_commands = [
field[4:] for field in dir(self) if field.startswith("cmd_")]
def send(self, message):
message_id, command, arguments = parse_message(message)
if command in self.known_commands:
try:
return format_success(
message_id, getattr(self, "cmd_" + command)(arguments))
except ValueError as exception:
return format_error(message_id, exception.args[0])
else:
return format_error(message_id, "unknown command")
def vertex_in_range(self, vertex):
if vertex == PASS:
return True
if 1 <= vertex[0] <= self.size and 1 <= vertex[1] <= self.size:
return True
else:
return False
def cmd_protocol_version(self, arguments):
return 2
def cmd_name(self, arguments):
return self._name
def cmd_version(self, arguments):
return self._version
def cmd_known_command(self, arguments):
return gtp_boolean(arguments in self.known_commands)
def cmd_list_commands(self, arguments):
return gtp_list(self.known_commands)
def cmd_quit(self, arguments):
self.disconnect = True
def cmd_boardsize(self, arguments):
if arguments.isdigit():
size = int(arguments)
if MIN_BOARD_SIZE <= size <= MAX_BOARD_SIZE:
self.size = size
self._game.set_size(size)
else:
raise ValueError("unacceptable size")
else:
raise ValueError("non digit size")
def cmd_clear_board(self, arguments):
self._game.clear()
def cmd_komi(self, arguments):
try:
komi = float(arguments)
self.komi = komi
self._game.set_komi(komi)
except ValueError:
raise ValueError("syntax error")
def cmd_play(self, arguments):
move = parse_move(arguments)
if move:
color, vertex = move
if self.vertex_in_range(vertex):
if self._game.make_move(color, vertex):
return
raise ValueError("illegal move")
def cmd_genmove(self, arguments):
c = parse_color(arguments)
if c:
move = self._game.get_move(c)
self._game.make_move(c, move)
return gtp_vertex(move)
else:
raise ValueError("unknown player: {}".format(arguments))
class MinimalGame(object):
def __init__(self, size=19, komi=6.5):
self.size = size
self.komi = 6.5
self.board = [EMPTY] * (self.size * self.size)
def _flatten(self, vertex):
(x, y) = vertex
return (x - 1) * self.size + (y - 1)
def clear(self):
self.board = [EMPTY] * (self.size * self.size)
def make_move(self, color, vertex):
if vertex == PASS:
return True
idx = self._flatten(vertex)
if self.board[idx] == EMPTY:
self.board[idx] = color
return True
else:
return False
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
def get_move(self, color):
return (0, 0)
| true | true |
f7250ab88692dbc04fa8a5fc7d974f0ae2eb1e02 | 2,352 | py | Python | model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 2 | 2020-11-18T14:14:06.000Z | 2020-11-28T04:55:57.000Z | model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 30 | 2020-11-13T11:44:07.000Z | 2022-02-21T13:03:16.000Z | model-optimizer/mo/front/kaldi/extractors/normalize_component_ext.py | mmakridi/openvino | 769bb7709597c14debdaa356dd60c5a78bdfa97e | [
"Apache-2.0"
] | 3 | 2021-03-09T08:27:29.000Z | 2021-04-07T04:58:54.000Z | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.normalize import NormalizeOp
from mo.front.caffe.extractors.utils import embed_input
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.loader.utils import collect_until_token, read_binary_bool_token, read_binary_integer32_token, \
read_binary_float_token
from mo.utils.error import Error
class NormalizeComponentFrontExtractor(FrontExtractorOp):
op = 'normalizecomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
try:
collect_until_token(pb, b'<Dim>')
except Error:
try:
pb.seek(0)
collect_until_token(pb, b'<InputDim>')
except Error:
raise Error("Neither <Dim> nor <InputDim> were found")
in_dim = read_binary_integer32_token(pb)
try:
collect_until_token(pb, b'<TargetRms>')
target_rms = read_binary_float_token(pb)
except Error:
# model does not contain TargetRms
target_rms = 1.0
try:
collect_until_token(pb, b'<AddLogStddev>')
add_log = read_binary_bool_token(pb)
except Error:
# model does not contain AddLogStddev
add_log = False
if add_log is not False:
raise Error("AddLogStddev True in Normalize component is not supported")
scale = target_rms * np.sqrt(in_dim)
attrs = {
'eps': 0.00000001,
'across_spatial': 0,
'channel_shared': 1,
'in_dim': in_dim,
}
embed_input(attrs, 1, 'weights', [scale])
NormalizeOp.update_node_stat(node, attrs)
return cls.enabled
| 32.219178 | 115 | 0.65051 |
import numpy as np
from extensions.ops.normalize import NormalizeOp
from mo.front.caffe.extractors.utils import embed_input
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.loader.utils import collect_until_token, read_binary_bool_token, read_binary_integer32_token, \
read_binary_float_token
from mo.utils.error import Error
class NormalizeComponentFrontExtractor(FrontExtractorOp):
op = 'normalizecomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
try:
collect_until_token(pb, b'<Dim>')
except Error:
try:
pb.seek(0)
collect_until_token(pb, b'<InputDim>')
except Error:
raise Error("Neither <Dim> nor <InputDim> were found")
in_dim = read_binary_integer32_token(pb)
try:
collect_until_token(pb, b'<TargetRms>')
target_rms = read_binary_float_token(pb)
except Error:
target_rms = 1.0
try:
collect_until_token(pb, b'<AddLogStddev>')
add_log = read_binary_bool_token(pb)
except Error:
add_log = False
if add_log is not False:
raise Error("AddLogStddev True in Normalize component is not supported")
scale = target_rms * np.sqrt(in_dim)
attrs = {
'eps': 0.00000001,
'across_spatial': 0,
'channel_shared': 1,
'in_dim': in_dim,
}
embed_input(attrs, 1, 'weights', [scale])
NormalizeOp.update_node_stat(node, attrs)
return cls.enabled
| true | true |
f7250ae4fa8e806718a9b01881b2da9dab3d34a5 | 6,708 | py | Python | aclhound/targets/arista.py | gdelaney/aclhound | 417a4ad788e886ce78a9527222e2ab4609c20d23 | [
"BSD-2-Clause"
] | 13 | 2015-01-10T16:42:07.000Z | 2018-07-12T01:53:21.000Z | aclhound/targets/arista.py | gdelaney/aclhound | 417a4ad788e886ce78a9527222e2ab4609c20d23 | [
"BSD-2-Clause"
] | 31 | 2015-01-02T21:42:00.000Z | 2016-04-13T21:31:52.000Z | aclhound/targets/arista.py | gdelaney/aclhound | 417a4ad788e886ce78a9527222e2ab4609c20d23 | [
"BSD-2-Clause"
] | 15 | 2015-01-17T20:09:01.000Z | 2020-09-23T09:06:07.000Z | #!/usr/bin/env python2.7
# Copyright (C) 2014-2015 Job Snijders <job@instituut.net>
#
# This file is part of ACLHound
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ipaddr import IPNetwork
from grako.contexts import Closure
def render(self, **kwargs):
policy = self.data
afi = kwargs['afi']
config_blob = []
def afi_match(host):
if host == "any":
return True
elif IPNetwork(host).version == afi:
return True
else:
return False
for rule in policy:
rule = rule[0]
s_hosts = rule['source']['l3']['ip']
d_hosts = rule['destination']['l3']['ip']
logging = rule['keywords']['log']
stateful = rule['keywords']['state']
# deal with ICMP
if "icmp" in rule['protocol']:
policy = rule['protocol']['icmp']
# FIXME this should happen in render or aclsemantics
if not isinstance(policy, Closure):
policy = [policy]
# cycle through all ICMP related elements in the AST
for entry in policy:
for s_host in s_hosts:
if not afi_match(s_host):
continue
for d_host in d_hosts:
if not afi_match(d_host):
continue
if rule['action'] == "allow":
action = "permit"
else:
action = "deny"
line = "%s icmp" % action
for host in [s_host, d_host]:
if host == "any":
line += " any"
elif IPNetwork(host).prefixlen in [32, 128]:
line += " host %s" % host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(host).network,
IPNetwork(host).hostmask)
else:
line += " " + host
if not entry == "any":
for el in ['icmp_type', 'icmp_code']:
if not str(entry[el]) == "any":
line += " " + str(entry[el])
if logging:
line += " log"
if line not in config_blob:
config_blob.append(line)
# jump out of the loop because we have nothing to do with
# L4 when doing ICMP
continue
# layer 3 and 4
s_ports = rule['source']['l4']['ports']
d_ports = rule['destination']['l4']['ports']
for s_port in s_ports:
for d_port in d_ports:
for s_host in s_hosts:
if not afi_match(s_host):
continue
for d_host in d_hosts:
if not afi_match(d_host):
continue
if rule['action'] == "allow":
action = "permit"
else:
action = "deny"
line = action
if rule['protocol'] == "any":
line += " ip" if afi == 4 else " ipv6"
else:
line += " " + rule['protocol']
if s_host == "any":
line += " any"
elif IPNetwork(s_host).prefixlen in [32, 128]:
line += " host %s" % s_host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(s_host).network,
IPNetwork(s_host).hostmask)
else:
line += " " + s_host
if type(s_port) == tuple:
line += " range %s %s" % (s_port[0], s_port[1])
elif not s_port == "any":
line += " eq %s" % str(s_port)
if d_host == "any":
line += " any"
elif IPNetwork(d_host).prefixlen in [32, 128]:
line += " host %s" % d_host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(d_host).network,
IPNetwork(d_host).hostmask)
else:
line += " " + d_host
if type(d_port) == tuple:
line += " range %s %s" % (d_port[0], d_port[1])
elif not d_port == "any":
line += " eq %s" % str(d_port)
if stateful and rule['protocol'] == "tcp":
line += " established"
if logging:
line += " log"
if line not in config_blob:
config_blob.append(line)
if afi == 4:
config_blob.append('deny ip any any')
if afi == 6:
config_blob.append('deny any any')
return config_blob
| 41.153374 | 78 | 0.45319 |
from ipaddr import IPNetwork
from grako.contexts import Closure
def render(self, **kwargs):
policy = self.data
afi = kwargs['afi']
config_blob = []
def afi_match(host):
if host == "any":
return True
elif IPNetwork(host).version == afi:
return True
else:
return False
for rule in policy:
rule = rule[0]
s_hosts = rule['source']['l3']['ip']
d_hosts = rule['destination']['l3']['ip']
logging = rule['keywords']['log']
stateful = rule['keywords']['state']
if "icmp" in rule['protocol']:
policy = rule['protocol']['icmp']
if not isinstance(policy, Closure):
policy = [policy]
for entry in policy:
for s_host in s_hosts:
if not afi_match(s_host):
continue
for d_host in d_hosts:
if not afi_match(d_host):
continue
if rule['action'] == "allow":
action = "permit"
else:
action = "deny"
line = "%s icmp" % action
for host in [s_host, d_host]:
if host == "any":
line += " any"
elif IPNetwork(host).prefixlen in [32, 128]:
line += " host %s" % host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(host).network,
IPNetwork(host).hostmask)
else:
line += " " + host
if not entry == "any":
for el in ['icmp_type', 'icmp_code']:
if not str(entry[el]) == "any":
line += " " + str(entry[el])
if logging:
line += " log"
if line not in config_blob:
config_blob.append(line)
continue
s_ports = rule['source']['l4']['ports']
d_ports = rule['destination']['l4']['ports']
for s_port in s_ports:
for d_port in d_ports:
for s_host in s_hosts:
if not afi_match(s_host):
continue
for d_host in d_hosts:
if not afi_match(d_host):
continue
if rule['action'] == "allow":
action = "permit"
else:
action = "deny"
line = action
if rule['protocol'] == "any":
line += " ip" if afi == 4 else " ipv6"
else:
line += " " + rule['protocol']
if s_host == "any":
line += " any"
elif IPNetwork(s_host).prefixlen in [32, 128]:
line += " host %s" % s_host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(s_host).network,
IPNetwork(s_host).hostmask)
else:
line += " " + s_host
if type(s_port) == tuple:
line += " range %s %s" % (s_port[0], s_port[1])
elif not s_port == "any":
line += " eq %s" % str(s_port)
if d_host == "any":
line += " any"
elif IPNetwork(d_host).prefixlen in [32, 128]:
line += " host %s" % d_host.split('/')[0]
elif afi == 4:
line += " %s %s" % (IPNetwork(d_host).network,
IPNetwork(d_host).hostmask)
else:
line += " " + d_host
if type(d_port) == tuple:
line += " range %s %s" % (d_port[0], d_port[1])
elif not d_port == "any":
line += " eq %s" % str(d_port)
if stateful and rule['protocol'] == "tcp":
line += " established"
if logging:
line += " log"
if line not in config_blob:
config_blob.append(line)
if afi == 4:
config_blob.append('deny ip any any')
if afi == 6:
config_blob.append('deny any any')
return config_blob
| true | true |
f7250b0e5c4a53b9bd86487a7b9aa16553365458 | 3,463 | py | Python | deepgram/_utils.py | jeremycline/python-sdk | 5847241be8585982673b6f21080c3f5b921123e4 | [
"MIT"
] | null | null | null | deepgram/_utils.py | jeremycline/python-sdk | 5847241be8585982673b6f21080c3f5b921123e4 | [
"MIT"
] | null | null | null | deepgram/_utils.py | jeremycline/python-sdk | 5847241be8585982673b6f21080c3f5b921123e4 | [
"MIT"
] | null | null | null | from ._constants import DEFAULT_ENDPOINT
from ._types import Options
from ._version import __version__
from typing import Any, Union, Optional, IO, Mapping, Tuple, List
import aiohttp, urllib.parse, json, re, platform
import websockets, websockets.client
Payload = Optional[Union[dict, str, bytes, IO]]
def _prepare_headers(options: Options, headers: Mapping[str, str] = {}) -> dict:
return {**headers,
'Authorization': None if 'api_key' not in options else options.get('auth_method', 'Token') + ' ' + options['api_key'],
'User-Agent': f'deepgram/{__version__} python/{platform.python_version()}'
}
def _normalize_payload(payload: Payload) -> Optional[Union[bytes, IO]]:
if payload is None:
return None
if isinstance(payload, dict):
return json.dumps(payload).encode('utf-8')
if isinstance(payload, str):
return payload.encode('utf-8')
return payload
def _make_query_string(params: Mapping[str, Any] = {}) -> str:
def elem_decomposer(key: str, value: Any) -> List[Tuple[str, str]]:
if value in [None, ""]:
return []
if isinstance(value, list):
return [elem_decomposer(key, item)[0] for item in value] # break into multiple parameters
# just take the first element in the sublist, rather than trying to flatten recursively
# passing nested lists as query parameters isn't really well-defined,
# nor does anything in our API currently take things like that as of 2021-08-10
# so everything coming through this second pass should be a 1-item list
if isinstance(value, bool):
return [(key, str(value).lower())] # make sure False and True stay lowercased in accordance with DG convention
return [(key, str(value))]
unflattened = [elem_decomposer(k, v) for k, v in params.items()] # sublist for each original parameter
flattened = sum(unflattened, []) # flatten
return ('?' if flattened else '') + urllib.parse.urlencode(flattened)
async def _request(path: str, options: Options, method: str = 'GET', payload: Payload = None, headers: Optional[Mapping[str, str]] = {}) -> Optional[dict]:
destination = options.get('api_url', DEFAULT_ENDPOINT) + path
updated_headers = _prepare_headers(options, headers)
try:
async with aiohttp.request(method, destination, data=_normalize_payload(payload), headers=updated_headers, raise_for_status=True) as resp:
content = (await resp.text()).strip()
if not content:
return None
body = json.loads(content)
if body.get('error'):
raise Exception(f'DG: {content}')
return body
except aiohttp.ClientResponseError as e:
raise Exception(f'DG: {e}')
except aiohttp.ClientError as e:
raise e
async def _socket_connect(path: str, options: Options, headers: Optional[Mapping[str, str]] = {}) -> websockets.client.WebSocketClientProtocol:
destination = re.sub(r'^http', 'ws', options.get('api_url', DEFAULT_ENDPOINT)) + path
updated_headers = _prepare_headers(options, headers)
try:
return await websockets.connect(destination, extra_headers=updated_headers, ping_interval=5)
# If we're streaming too much faster than realtime, connection might close without an aggressive ping interval
except websockets.exceptions.InvalidHandshake as e:
raise Exception(f'DG: {e}') | 50.926471 | 155 | 0.67687 | from ._constants import DEFAULT_ENDPOINT
from ._types import Options
from ._version import __version__
from typing import Any, Union, Optional, IO, Mapping, Tuple, List
import aiohttp, urllib.parse, json, re, platform
import websockets, websockets.client
Payload = Optional[Union[dict, str, bytes, IO]]
def _prepare_headers(options: Options, headers: Mapping[str, str] = {}) -> dict:
return {**headers,
'Authorization': None if 'api_key' not in options else options.get('auth_method', 'Token') + ' ' + options['api_key'],
'User-Agent': f'deepgram/{__version__} python/{platform.python_version()}'
}
def _normalize_payload(payload: Payload) -> Optional[Union[bytes, IO]]:
if payload is None:
return None
if isinstance(payload, dict):
return json.dumps(payload).encode('utf-8')
if isinstance(payload, str):
return payload.encode('utf-8')
return payload
def _make_query_string(params: Mapping[str, Any] = {}) -> str:
def elem_decomposer(key: str, value: Any) -> List[Tuple[str, str]]:
if value in [None, ""]:
return []
if isinstance(value, list):
return [elem_decomposer(key, item)[0] for item in value]
# nor does anything in our API currently take things like that as of 2021-08-10
# so everything coming through this second pass should be a 1-item list
if isinstance(value, bool):
return [(key, str(value).lower())] # make sure False and True stay lowercased in accordance with DG convention
return [(key, str(value))]
unflattened = [elem_decomposer(k, v) for k, v in params.items()] # sublist for each original parameter
flattened = sum(unflattened, []) # flatten
return ('?' if flattened else '') + urllib.parse.urlencode(flattened)
async def _request(path: str, options: Options, method: str = 'GET', payload: Payload = None, headers: Optional[Mapping[str, str]] = {}) -> Optional[dict]:
destination = options.get('api_url', DEFAULT_ENDPOINT) + path
updated_headers = _prepare_headers(options, headers)
try:
async with aiohttp.request(method, destination, data=_normalize_payload(payload), headers=updated_headers, raise_for_status=True) as resp:
content = (await resp.text()).strip()
if not content:
return None
body = json.loads(content)
if body.get('error'):
raise Exception(f'DG: {content}')
return body
except aiohttp.ClientResponseError as e:
raise Exception(f'DG: {e}')
except aiohttp.ClientError as e:
raise e
async def _socket_connect(path: str, options: Options, headers: Optional[Mapping[str, str]] = {}) -> websockets.client.WebSocketClientProtocol:
destination = re.sub(r'^http', 'ws', options.get('api_url', DEFAULT_ENDPOINT)) + path
updated_headers = _prepare_headers(options, headers)
try:
return await websockets.connect(destination, extra_headers=updated_headers, ping_interval=5)
# If we're streaming too much faster than realtime, connection might close without an aggressive ping interval
except websockets.exceptions.InvalidHandshake as e:
raise Exception(f'DG: {e}') | true | true |
f7250ba98072e446898a6e4e1a69f331c437a919 | 215,396 | py | Python | hydrus/client/gui/ClientGUITags.py | bbappserver/hydrus-build-test | de7868c2f549faaf4a189b120cddcb39d16a64ba | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUITags.py | bbappserver/hydrus-build-test | de7868c2f549faaf4a189b120cddcb39d16a64ba | [
"WTFPL"
] | null | null | null | hydrus/client/gui/ClientGUITags.py | bbappserver/hydrus-build-test | de7868c2f549faaf4a189b120cddcb39d16a64ba | [
"WTFPL"
] | null | null | null | import collections
import itertools
import os
import random
import time
import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusText
from hydrus.core.networking import HydrusNetwork
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientManagers
from hydrus.client.gui import ClientGUIAsync
from hydrus.client.gui import ClientGUICore as CGC
from hydrus.client.gui import ClientGUIDialogs
from hydrus.client.gui import ClientGUIDialogsQuick
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import ClientGUIMenus
from hydrus.client.gui import ClientGUIScrolledPanels
from hydrus.client.gui import ClientGUIScrolledPanelsReview
from hydrus.client.gui import ClientGUIShortcuts
from hydrus.client.gui import ClientGUITagSuggestions
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.lists import ClientGUIListBoxes
from hydrus.client.gui.lists import ClientGUIListConstants as CGLC
from hydrus.client.gui.lists import ClientGUIListCtrl
from hydrus.client.gui.networking import ClientGUIHydrusNetwork
from hydrus.client.gui.search import ClientGUIACDropdown
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.gui.widgets import ClientGUIControls
from hydrus.client.gui.widgets import ClientGUIMenuButton
from hydrus.client.media import ClientMedia
from hydrus.client.metadata import ClientTags
from hydrus.client.metadata import ClientTagsHandling
class EditTagAutocompleteOptionsPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent: QW.QWidget, tag_autocomplete_options: ClientTagsHandling.TagAutocompleteOptions ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._original_tag_autocomplete_options = tag_autocomplete_options
services_manager = HG.client_controller.services_manager
all_real_tag_service_keys = services_manager.GetServiceKeys( HC.REAL_TAG_SERVICES )
all_real_file_service_keys = services_manager.GetServiceKeys( ( HC.LOCAL_FILE_DOMAIN, HC.FILE_REPOSITORY ) )
#
self._write_autocomplete_tag_domain = ClientGUICommon.BetterChoice( self )
self._write_autocomplete_tag_domain.setToolTip( 'A manage tags autocomplete will start with this domain. Typically only useful with this service or "all known tags".' )
self._write_autocomplete_tag_domain.addItem( services_manager.GetName( CC.COMBINED_TAG_SERVICE_KEY ), CC.COMBINED_TAG_SERVICE_KEY )
for service_key in all_real_tag_service_keys:
self._write_autocomplete_tag_domain.addItem( services_manager.GetName( service_key ), service_key )
self._override_write_autocomplete_file_domain = QW.QCheckBox( self )
self._override_write_autocomplete_file_domain.setToolTip( 'If set, a manage tags dialog autocomplete will start with a different file domain than the one that launched the dialog.' )
self._write_autocomplete_file_domain = ClientGUICommon.BetterChoice( self )
self._write_autocomplete_file_domain.setToolTip( 'A manage tags autocomplete will start with this domain. Normally only useful for "all known files" or "my files".' )
self._write_autocomplete_file_domain.addItem( services_manager.GetName( CC.COMBINED_FILE_SERVICE_KEY ), CC.COMBINED_FILE_SERVICE_KEY )
for service_key in all_real_file_service_keys:
self._write_autocomplete_file_domain.addItem( services_manager.GetName( service_key ), service_key )
self._search_namespaces_into_full_tags = QW.QCheckBox( self )
self._search_namespaces_into_full_tags.setToolTip( 'If on, a search for "ser" will return all "series:" results such as "series:metrod". On large tag services, these searches are extremely slow.' )
self._namespace_bare_fetch_all_allowed = QW.QCheckBox( self )
self._namespace_bare_fetch_all_allowed.setToolTip( 'If on, a search for "series:" will return all "series:" results. On large tag services, these searches are extremely slow.' )
self._namespace_fetch_all_allowed = QW.QCheckBox( self )
self._namespace_fetch_all_allowed.setToolTip( 'If on, a search for "series:*" will return all "series:" results. On large tag services, these searches are extremely slow.' )
self._fetch_all_allowed = QW.QCheckBox( self )
self._fetch_all_allowed.setToolTip( 'If on, a search for "*" will return all tags. On large tag services, these searches are extremely slow.' )
self._fetch_results_automatically = QW.QCheckBox( self )
self._fetch_results_automatically.setToolTip( 'If on, results will load as you type. If off, you will have to hit a shortcut (default Ctrl+Space) to load results.' )
self._exact_match_character_threshold = ClientGUICommon.NoneableSpinCtrl( self, none_phrase = 'always autocomplete (only appropriate for small tag services)', min = 1, max = 256, unit = 'characters' )
self._exact_match_character_threshold.setToolTip( 'When the search text has <= this many characters, autocomplete will not occur and you will only get results that exactly match the input. Increasing this value makes autocomplete snappier but reduces the number of results.' )
#
self._write_autocomplete_tag_domain.SetValue( tag_autocomplete_options.GetWriteAutocompleteTagDomain() )
self._override_write_autocomplete_file_domain.setChecked( tag_autocomplete_options.OverridesWriteAutocompleteFileDomain() )
self._write_autocomplete_file_domain.SetValue( tag_autocomplete_options.GetWriteAutocompleteFileDomain() )
self._search_namespaces_into_full_tags.setChecked( tag_autocomplete_options.SearchNamespacesIntoFullTags() )
self._namespace_bare_fetch_all_allowed.setChecked( tag_autocomplete_options.NamespaceBareFetchAllAllowed() )
self._namespace_fetch_all_allowed.setChecked( tag_autocomplete_options.NamespaceFetchAllAllowed() )
self._fetch_all_allowed.setChecked( tag_autocomplete_options.FetchAllAllowed() )
self._fetch_results_automatically.setChecked( tag_autocomplete_options.FetchResultsAutomatically() )
self._exact_match_character_threshold.SetValue( tag_autocomplete_options.GetExactMatchCharacterThreshold() )
#
rows = []
rows.append( ( 'Fetch results as you type: ', self._fetch_results_automatically ) )
rows.append( ( 'Do-not-autocomplete character threshold: ', self._exact_match_character_threshold ) )
if tag_autocomplete_options.GetServiceKey() == CC.COMBINED_TAG_SERVICE_KEY:
self._write_autocomplete_tag_domain.setVisible( False )
self._override_write_autocomplete_file_domain.setVisible( False )
self._write_autocomplete_file_domain.setVisible( False )
else:
rows.append( ( 'Override default autocomplete file domain in _manage tags_: ', self._override_write_autocomplete_file_domain ) )
rows.append( ( 'Default autocomplete file domain in _manage tags_: ', self._write_autocomplete_file_domain ) )
rows.append( ( 'Default autocomplete tag domain in _manage tags_: ', self._write_autocomplete_tag_domain ) )
rows.append( ( 'Search namespaces with normal input: ', self._search_namespaces_into_full_tags ) )
rows.append( ( 'Allow "namespace:": ', self._namespace_bare_fetch_all_allowed ) )
rows.append( ( 'Allow "namespace:*": ', self._namespace_fetch_all_allowed ) )
rows.append( ( 'Allow "*": ', self._fetch_all_allowed ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = QP.VBoxLayout()
label = 'The settings that permit searching namespaces and expansive "*" queries can be very expensive on a large client and may cause problems!'
st = ClientGUICommon.BetterStaticText( self, label = label )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.widget().setLayout( vbox )
self._UpdateControls()
self._override_write_autocomplete_file_domain.stateChanged.connect( self._UpdateControls )
self._search_namespaces_into_full_tags.stateChanged.connect( self._UpdateControls )
self._namespace_bare_fetch_all_allowed.stateChanged.connect( self._UpdateControls )
def _UpdateControls( self ):
self._write_autocomplete_file_domain.setEnabled( self._override_write_autocomplete_file_domain.isChecked() )
if self._search_namespaces_into_full_tags.isChecked():
self._namespace_bare_fetch_all_allowed.setEnabled( False )
self._namespace_fetch_all_allowed.setEnabled( False )
else:
self._namespace_bare_fetch_all_allowed.setEnabled( True )
if self._namespace_bare_fetch_all_allowed.isChecked():
self._namespace_fetch_all_allowed.setEnabled( False )
else:
self._namespace_fetch_all_allowed.setEnabled( True )
for c in ( self._namespace_bare_fetch_all_allowed, self._namespace_fetch_all_allowed ):
if not c.isEnabled():
c.blockSignals( True )
c.setChecked( True )
c.blockSignals( False )
def GetValue( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( self._original_tag_autocomplete_options.GetServiceKey() )
write_autocomplete_tag_domain = self._write_autocomplete_tag_domain.GetValue()
override_write_autocomplete_file_domain = self._override_write_autocomplete_file_domain.isChecked()
write_autocomplete_file_domain = self._write_autocomplete_file_domain.GetValue()
search_namespaces_into_full_tags = self._search_namespaces_into_full_tags.isChecked()
namespace_bare_fetch_all_allowed = self._namespace_bare_fetch_all_allowed.isChecked()
namespace_fetch_all_allowed = self._namespace_fetch_all_allowed.isChecked()
fetch_all_allowed = self._fetch_all_allowed.isChecked()
tag_autocomplete_options.SetTuple(
write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
tag_autocomplete_options.SetFetchResultsAutomatically( self._fetch_results_automatically.isChecked() )
tag_autocomplete_options.SetExactMatchCharacterThreshold( self._exact_match_character_threshold.GetValue() )
return tag_autocomplete_options
class EditTagDisplayApplication( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ):
master_service_keys_to_sibling_applicable_service_keys = collections.defaultdict( list, master_service_keys_to_sibling_applicable_service_keys )
master_service_keys_to_parent_applicable_service_keys = collections.defaultdict( list, master_service_keys_to_parent_applicable_service_keys )
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._tag_services_notebook = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services_notebook, 100 )
self._tag_services_notebook.setMinimumWidth( min_width )
#
services = list( HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES ) )
select_service_key = services[0].GetServiceKey()
for service in services:
master_service_key = service.GetServiceKey()
name = service.GetName()
sibling_applicable_service_keys = master_service_keys_to_sibling_applicable_service_keys[ master_service_key ]
parent_applicable_service_keys = master_service_keys_to_parent_applicable_service_keys[ master_service_key ]
page = self._Panel( self._tag_services_notebook, master_service_key, sibling_applicable_service_keys, parent_applicable_service_keys )
select = master_service_key == select_service_key
self._tag_services_notebook.addTab( page, name )
if select:
self._tag_services_notebook.setCurrentWidget( page )
#
vbox = QP.VBoxLayout()
message = 'While a tag service normally applies its own siblings and parents to itself, it does not have to. If you want a different service\'s siblings (e.g. putting the PTR\'s siblings on your "my tags"), or multiple services\', then set it here. You can also apply no siblings or parents at all.'
message += os.linesep * 2
message += 'If there are conflicts, the services at the top of the list have precedence. Parents are collapsed by sibling rules before they are applied.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. Changes will start applying as soon as you ok this dialog.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. Changes here will only start to apply when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is sync work to do, you will have to force it to run using the \'review\' window under _tags->siblings and parents sync_.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def GetValue( self ):
master_service_keys_to_sibling_applicable_service_keys = collections.defaultdict( list )
master_service_keys_to_parent_applicable_service_keys = collections.defaultdict( list )
for page in self._tag_services_notebook.GetPages():
( master_service_key, sibling_applicable_service_keys, parent_applicable_service_keys ) = page.GetValue()
master_service_keys_to_sibling_applicable_service_keys[ master_service_key ] = sibling_applicable_service_keys
master_service_keys_to_parent_applicable_service_keys[ master_service_key ] = parent_applicable_service_keys
return ( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys )
class _Panel( QW.QWidget ):
def __init__( self, parent: QW.QWidget, master_service_key: bytes, sibling_applicable_service_keys: typing.Sequence[ bytes ], parent_applicable_service_keys: typing.Sequence[ bytes ] ):
QW.QWidget.__init__( self, parent )
self._master_service_key = master_service_key
#
self._sibling_box = ClientGUICommon.StaticBox( self, 'sibling application' )
#
self._sibling_service_keys_listbox = ClientGUIListBoxes.QueueListBox( self._sibling_box, 4, HG.client_controller.services_manager.GetName, add_callable = self._AddSibling )
#
self._sibling_service_keys_listbox.AddDatas( sibling_applicable_service_keys )
#
self._parent_box = ClientGUICommon.StaticBox( self, 'parent application' )
#
self._parent_service_keys_listbox = ClientGUIListBoxes.QueueListBox( self._sibling_box, 4, HG.client_controller.services_manager.GetName, add_callable = self._AddParent )
#
self._parent_service_keys_listbox.AddDatas( parent_applicable_service_keys )
#
self._sibling_box.Add( self._sibling_service_keys_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
self._parent_box.Add( self._parent_service_keys_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._sibling_box, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._parent_box, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _AddParent( self ):
current_service_keys = self._parent_service_keys_listbox.GetData()
return self._AddService( current_service_keys )
def _AddService( self, current_service_keys ):
allowed_services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
allowed_services = [ service for service in allowed_services if service.GetServiceKey() not in current_service_keys ]
if len( allowed_services ) == 0:
QW.QMessageBox.information( self, 'Information', 'You have all the current tag services applied to this service.' )
raise HydrusExceptions.VetoException()
choice_tuples = [ ( service.GetName(), service.GetServiceKey(), service.GetName() ) for service in allowed_services ]
try:
service_key = ClientGUIDialogsQuick.SelectFromListButtons( self, 'Which service?', choice_tuples )
return service_key
except HydrusExceptions.CancelledException:
raise HydrusExceptions.VetoException()
def _AddSibling( self ):
current_service_keys = self._sibling_service_keys_listbox.GetData()
return self._AddService( current_service_keys )
def GetValue( self ):
return ( self._master_service_key, self._sibling_service_keys_listbox.GetData(), self._parent_service_keys_listbox.GetData() )
class EditTagDisplayManagerPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, tag_display_manager: ClientTagsHandling.TagDisplayManager ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._original_tag_display_manager = tag_display_manager
self._tag_services = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services, 100 )
self._tag_services.setMinimumWidth( min_width )
#
services = list( HG.client_controller.services_manager.GetServices( ( HC.COMBINED_TAG, HC.LOCAL_TAG, HC.TAG_REPOSITORY ) ) )
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_services, self._original_tag_display_manager, service_key )
select = service_key == CC.COMBINED_TAG_SERVICE_KEY
self._tag_services.addTab( page, name )
if select: self._tag_services.setCurrentWidget( page )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_services, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def GetValue( self ):
tag_display_manager = self._original_tag_display_manager.Duplicate()
tag_display_manager.ClearTagDisplayOptions()
for page in self._tag_services.GetPages():
( service_key, tag_display_types_to_tag_filters, tag_autocomplete_options ) = page.GetValue()
for ( tag_display_type, tag_filter ) in tag_display_types_to_tag_filters.items():
tag_display_manager.SetTagFilter( tag_display_type, service_key, tag_filter )
tag_display_manager.SetTagAutocompleteOptions( tag_autocomplete_options )
return tag_display_manager
class _Panel( QW.QWidget ):
def __init__( self, parent: QW.QWidget, tag_display_manager: ClientTagsHandling.TagDisplayManager, service_key: bytes ):
QW.QWidget.__init__( self, parent )
single_tag_filter = tag_display_manager.GetTagFilter( ClientTags.TAG_DISPLAY_SINGLE_MEDIA, service_key )
selection_tag_filter = tag_display_manager.GetTagFilter( ClientTags.TAG_DISPLAY_SELECTION_LIST, service_key )
tag_autocomplete_options = tag_display_manager.GetTagAutocompleteOptions( service_key )
self._service_key = service_key
#
self._display_box = ClientGUICommon.StaticBox( self, 'display' )
message = 'This filters which tags will show on \'single\' file views such as the media viewer and thumbnail banners.'
self._single_tag_filter_button = TagFilterButton( self._display_box, message, single_tag_filter, label_prefix = 'tags shown: ' )
message = 'This filters which tags will show on \'selection\' file views such as the \'selection tags\' list on regular search pages.'
self._selection_tag_filter_button = TagFilterButton( self._display_box, message, selection_tag_filter, label_prefix = 'tags shown: ' )
#
self._tao_box = ClientGUICommon.StaticBox( self, 'autocomplete' )
self._tag_autocomplete_options_panel = EditTagAutocompleteOptionsPanel( self._tao_box, tag_autocomplete_options )
#
rows = []
rows.append( ( 'Tag filter for single file views: ', self._single_tag_filter_button ) )
rows.append( ( 'Tag filter for multiple file views: ', self._selection_tag_filter_button ) )
gridbox = ClientGUICommon.WrapInGrid( self._display_box, rows )
self._display_box.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
self._tao_box.Add( self._tag_autocomplete_options_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
if self._service_key == CC.COMBINED_TAG_SERVICE_KEY:
message = 'These options apply to all tag services, or to where the tag domain is "all known tags".'
message += os.linesep * 2
message += 'This tag domain is the union of all other services, so it can be more computationally expensive. You most often see it on new search pages.'
else:
message = 'This is just one tag service. You most often search a specific tag service in the manage tags dialog.'
st = ClientGUICommon.BetterStaticText( self, message )
st.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._display_box, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tao_box, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def GetValue( self ):
tag_display_types_to_tag_filters = {}
tag_display_types_to_tag_filters[ ClientTags.TAG_DISPLAY_SINGLE_MEDIA ] = self._single_tag_filter_button.GetValue()
tag_display_types_to_tag_filters[ ClientTags.TAG_DISPLAY_SELECTION_LIST ] = self._selection_tag_filter_button.GetValue()
tag_autocomplete_options = self._tag_autocomplete_options_panel.GetValue()
return ( self._service_key, tag_display_types_to_tag_filters, tag_autocomplete_options )
class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ):
TEST_RESULT_DEFAULT = 'Enter a tag here to test if it passes the current filter:'
TEST_RESULT_BLACKLIST_DEFAULT = 'Enter a tag here to test if it passes the blacklist (siblings tested, unnamespaced rules match namespaced tags):'
def __init__( self, parent, tag_filter, only_show_blacklist = False, namespaces = None, message = None ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._only_show_blacklist = only_show_blacklist
self._namespaces = namespaces
self._wildcard_replacements = {}
self._wildcard_replacements[ '*' ] = ''
self._wildcard_replacements[ '*:' ] = ':'
self._wildcard_replacements[ '*:*' ] = ':'
#
help_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().help, self._ShowHelp )
help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', QG.QColor( 0, 0, 255 ) )
#
self._import_favourite = ClientGUICommon.BetterButton( self, 'import', self._ImportFavourite )
self._export_favourite = ClientGUICommon.BetterButton( self, 'export', self._ExportFavourite )
self._load_favourite = ClientGUICommon.BetterButton( self, 'load', self._LoadFavourite )
self._save_favourite = ClientGUICommon.BetterButton( self, 'save', self._SaveFavourite )
self._delete_favourite = ClientGUICommon.BetterButton( self, 'delete', self._DeleteFavourite )
#
self._show_all_panels_button = ClientGUICommon.BetterButton( self, 'show other panels', self._ShowAllPanels )
self._show_all_panels_button.setToolTip( 'This shows the whitelist and advanced panels, in case you want to craft a clever blacklist with \'except\' rules.' )
show_the_button = self._only_show_blacklist and HG.client_controller.new_options.GetBoolean( 'advanced_mode' )
self._show_all_panels_button.setVisible( show_the_button )
#
self._notebook = ClientGUICommon.BetterNotebook( self )
#
self._advanced_panel = self._InitAdvancedPanel()
self._whitelist_panel = self._InitWhitelistPanel()
self._blacklist_panel = self._InitBlacklistPanel()
#
if self._only_show_blacklist:
self._whitelist_panel.setVisible( False )
self._notebook.addTab( self._blacklist_panel, 'blacklist' )
self._advanced_panel.setVisible( False )
else:
self._notebook.addTab( self._whitelist_panel, 'whitelist' )
self._notebook.addTab( self._blacklist_panel, 'blacklist' )
self._notebook.addTab( self._advanced_panel, 'advanced' )
#
self._redundant_st = ClientGUICommon.BetterStaticText( self, '', ellipsize_end = True )
self._current_filter_st = ClientGUICommon.BetterStaticText( self, 'currently keeping: ', ellipsize_end = True )
self._test_result_st = ClientGUICommon.BetterStaticText( self, self.TEST_RESULT_DEFAULT )
self._test_result_st.setAlignment( QC.Qt.AlignVCenter | QC.Qt.AlignRight )
self._test_result_st.setWordWrap( True )
self._test_input = QW.QPlainTextEdit( self )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, help_hbox, CC.FLAGS_ON_RIGHT )
if message is not None:
st = ClientGUICommon.BetterStaticText( self, message )
st.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._import_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._export_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._load_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._save_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._delete_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( vbox, hbox, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, self._show_all_panels_button, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, self._notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._redundant_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._current_filter_st, CC.FLAGS_EXPAND_PERPENDICULAR )
test_text_vbox = QP.VBoxLayout()
QP.AddToLayout( test_text_vbox, self._test_result_st, CC.FLAGS_EXPAND_PERPENDICULAR )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, test_text_vbox, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( hbox, self._test_input, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( vbox, hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.widget().setLayout( vbox )
#
self._advanced_blacklist.listBoxChanged.connect( self._UpdateStatus )
self._advanced_whitelist.listBoxChanged.connect( self._UpdateStatus )
self._simple_whitelist_global_checkboxes.clicked.connect( self.EventSimpleWhitelistGlobalCheck )
self._simple_whitelist_namespace_checkboxes.clicked.connect( self.EventSimpleWhitelistNamespaceCheck )
self._simple_blacklist_global_checkboxes.clicked.connect( self.EventSimpleBlacklistGlobalCheck )
self._simple_blacklist_namespace_checkboxes.clicked.connect( self.EventSimpleBlacklistNamespaceCheck )
self._test_input.textChanged.connect( self._UpdateTest )
self.SetValue( tag_filter )
def _AdvancedAddBlacklist( self, tag_slice ):
tag_slice = self._CleanTagSliceInput( tag_slice )
if tag_slice in self._advanced_blacklist.GetTagSlices():
self._advanced_blacklist.RemoveTagSlices( ( tag_slice, ) )
else:
self._advanced_whitelist.RemoveTagSlices( ( tag_slice, ) )
if self._CurrentlyBlocked( tag_slice ):
self._ShowRedundantError( HydrusTags.ConvertTagSliceToString( tag_slice ) + ' is already blocked by a broader rule!' )
self._advanced_blacklist.AddTagSlices( ( tag_slice, ) )
self._UpdateStatus()
def _AdvancedAddBlacklistButton( self ):
tag_slice = self._advanced_blacklist_input.GetValue()
self._AdvancedAddBlacklist( tag_slice )
def _AdvancedAddBlacklistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _AdvancedAddWhitelist( self, tag_slice ):
tag_slice = self._CleanTagSliceInput( tag_slice )
if tag_slice in self._advanced_whitelist.GetTagSlices():
self._advanced_whitelist.RemoveTagSlices( ( tag_slice, ) )
else:
self._advanced_blacklist.RemoveTagSlices( ( tag_slice, ) )
# if it is still blocked after that, it needs whitelisting explicitly
if not self._CurrentlyBlocked( tag_slice ) and tag_slice not in ( '', ':' ):
self._ShowRedundantError( HydrusTags.ConvertTagSliceToString( tag_slice ) + ' is already permitted by a broader rule!' )
self._advanced_whitelist.AddTagSlices( ( tag_slice, ) )
self._UpdateStatus()
def _AdvancedAddWhitelistButton( self ):
tag_slice = self._advanced_whitelist_input.GetValue()
self._AdvancedAddWhitelist( tag_slice )
def _AdvancedAddWhitelistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddWhitelist( tag_slice )
def _AdvancedBlacklistEverything( self ):
self._advanced_blacklist.SetTagSlices( [] )
self._advanced_whitelist.RemoveTagSlices( ( '', ':' ) )
self._advanced_blacklist.AddTagSlices( ( '', ':' ) )
self._UpdateStatus()
def _AdvancedDeleteBlacklist( self ):
selected_tag_slices = self._advanced_blacklist.GetSelectedTagSlices()
if len( selected_tag_slices ) > 0:
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
self._advanced_blacklist.RemoveTagSlices( selected_tag_slices )
self._UpdateStatus()
def _AdvancedDeleteWhitelist( self ):
selected_tag_slices = self._advanced_whitelist.GetSelectedTagSlices()
if len( selected_tag_slices ) > 0:
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
self._advanced_whitelist.RemoveTagSlices( selected_tag_slices )
self._UpdateStatus()
def _CleanTagSliceInput( self, tag_slice ):
tag_slice = tag_slice.lower().strip()
while '**' in tag_slice:
tag_slice = tag_slice.replace( '**', '*' )
if tag_slice in self._wildcard_replacements:
tag_slice = self._wildcard_replacements[ tag_slice ]
if ':' in tag_slice:
( namespace, subtag ) = HydrusTags.SplitTag( tag_slice )
if subtag == '*':
tag_slice = '{}:'.format( namespace )
return tag_slice
def _CurrentlyBlocked( self, tag_slice ):
if tag_slice in ( '', ':' ):
test_slices = { tag_slice }
elif tag_slice.count( ':' ) == 1 and tag_slice.endswith( ':' ):
test_slices = { ':', tag_slice }
elif ':' in tag_slice:
( ns, st ) = HydrusTags.SplitTag( tag_slice )
test_slices = { ':', ns + ':', tag_slice }
else:
test_slices = { '', tag_slice }
blacklist = set( self._advanced_blacklist.GetTagSlices() )
return not blacklist.isdisjoint( test_slices )
def _DeleteFavourite( self ):
def do_it( name ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
if name in names_to_tag_filters:
message = 'Delete "{}"?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
del names_to_tag_filters[ name ]
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'delete {}'.format( name ), do_it, name )
CGC.core().PopupMenu( self, menu )
def _ExportFavourite( self ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'load {}'.format( name ), HG.client_controller.pub, 'clipboard', 'text', tag_filter.DumpToString() )
CGC.core().PopupMenu( self, menu )
def _GetWhiteBlacklistsPossible( self ):
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_is_only_simples = set( blacklist_tag_slices ).issubset( { '', ':' } )
nothing_is_whitelisted = len( whitelist_tag_slices ) == 0
whitelist_possible = blacklist_is_only_simples
blacklist_possible = nothing_is_whitelisted
return ( whitelist_possible, blacklist_possible )
def _ImportFavourite( self ):
try:
raw_text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
try:
obj = HydrusSerialisable.CreateFromString( raw_text )
except Exception as e:
QW.QMessageBox.critical( self, 'Error', 'I could not understand what was in the clipboard' )
return
if not isinstance( obj, HydrusTags.TagFilter ):
QW.QMessageBox.critical( self, 'Error', 'That object was not a Tag Filter! It seemed to be a "{}".'.format(type(obj)) )
return
tag_filter = obj
with ClientGUIDialogs.DialogTextEntry( self, 'Enter a name for the favourite.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
name = dlg.GetValue()
if name in names_to_tag_filters:
message = '"{}" already exists! Overwrite?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
names_to_tag_filters[ name ] = tag_filter
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
self.SetValue( tag_filter )
def _InitAdvancedPanel( self ):
advanced_panel = QW.QWidget( self._notebook )
#
blacklist_panel = ClientGUICommon.StaticBox( advanced_panel, 'exclude these' )
self._advanced_blacklist = ClientGUIListBoxes.ListBoxTagsFilter( blacklist_panel )
self._advanced_blacklist_input = ClientGUIControls.TextAndPasteCtrl( blacklist_panel, self._AdvancedAddBlacklistMultiple, allow_empty_input = True )
add_blacklist_button = ClientGUICommon.BetterButton( blacklist_panel, 'add', self._AdvancedAddBlacklistButton )
delete_blacklist_button = ClientGUICommon.BetterButton( blacklist_panel, 'delete', self._AdvancedDeleteBlacklist )
blacklist_everything_button = ClientGUICommon.BetterButton( blacklist_panel, 'block everything', self._AdvancedBlacklistEverything )
#
whitelist_panel = ClientGUICommon.StaticBox( advanced_panel, 'except for these' )
self._advanced_whitelist = ClientGUIListBoxes.ListBoxTagsFilter( whitelist_panel )
self._advanced_whitelist_input = ClientGUIControls.TextAndPasteCtrl( whitelist_panel, self._AdvancedAddWhitelistMultiple, allow_empty_input = True )
self._advanced_add_whitelist_button = ClientGUICommon.BetterButton( whitelist_panel, 'add', self._AdvancedAddWhitelistButton )
delete_whitelist_button = ClientGUICommon.BetterButton( whitelist_panel, 'delete', self._AdvancedDeleteWhitelist )
#
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._advanced_blacklist_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( button_hbox, add_blacklist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, delete_blacklist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, blacklist_everything_button, CC.FLAGS_CENTER_PERPENDICULAR )
blacklist_panel.Add( self._advanced_blacklist, CC.FLAGS_EXPAND_BOTH_WAYS )
blacklist_panel.Add( button_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._advanced_whitelist_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( button_hbox, self._advanced_add_whitelist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, delete_whitelist_button, CC.FLAGS_CENTER_PERPENDICULAR )
whitelist_panel.Add( self._advanced_whitelist, CC.FLAGS_EXPAND_BOTH_WAYS )
whitelist_panel.Add( button_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, blacklist_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( hbox, whitelist_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
advanced_panel.setLayout( hbox )
return advanced_panel
def _InitBlacklistPanel( self ):
blacklist_panel = QW.QWidget( self._notebook )
#
self._simple_blacklist_error_st = ClientGUICommon.BetterStaticText( blacklist_panel )
self._simple_blacklist_global_checkboxes = QP.CheckListBox( blacklist_panel )
self._simple_blacklist_global_checkboxes.Append( 'unnamespaced tags', '' )
self._simple_blacklist_global_checkboxes.Append( 'namespaced tags', ':' )
self._simple_blacklist_namespace_checkboxes = QP.CheckListBox( blacklist_panel )
for namespace in self._namespaces:
if namespace == '':
continue
self._simple_blacklist_namespace_checkboxes.Append( namespace, namespace + ':' )
self._simple_blacklist = ClientGUIListBoxes.ListBoxTagsFilter( blacklist_panel )
self._simple_blacklist_input = ClientGUIControls.TextAndPasteCtrl( blacklist_panel, self._SimpleAddBlacklistMultiple, allow_empty_input = True )
#
left_vbox = QP.VBoxLayout()
QP.AddToLayout( left_vbox, self._simple_blacklist_global_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
left_vbox.addStretch( 1 )
QP.AddToLayout( left_vbox, self._simple_blacklist_namespace_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
right_vbox = QP.VBoxLayout()
QP.AddToLayout( right_vbox, self._simple_blacklist, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( right_vbox, self._simple_blacklist_input, CC.FLAGS_EXPAND_PERPENDICULAR )
main_hbox = QP.HBoxLayout()
QP.AddToLayout( main_hbox, left_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( main_hbox, right_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._simple_blacklist_error_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, main_hbox, CC.FLAGS_EXPAND_BOTH_WAYS )
blacklist_panel.setLayout( vbox )
self._simple_blacklist.tagsRemoved.connect( self._SimpleBlacklistRemoved )
return blacklist_panel
def _InitWhitelistPanel( self ):
whitelist_panel = QW.QWidget( self._notebook )
#
self._simple_whitelist_error_st = ClientGUICommon.BetterStaticText( whitelist_panel )
self._simple_whitelist_global_checkboxes = QP.CheckListBox( whitelist_panel )
self._simple_whitelist_global_checkboxes.Append( 'unnamespaced tags', '' )
self._simple_whitelist_global_checkboxes.Append( 'namespaced tags', ':' )
self._simple_whitelist_namespace_checkboxes = QP.CheckListBox( whitelist_panel )
for namespace in self._namespaces:
if namespace == '':
continue
self._simple_whitelist_namespace_checkboxes.Append( namespace, namespace + ':' )
self._simple_whitelist = ClientGUIListBoxes.ListBoxTagsFilter( whitelist_panel )
self._simple_whitelist_input = ClientGUIControls.TextAndPasteCtrl( whitelist_panel, self._SimpleAddWhitelistMultiple, allow_empty_input = True )
#
left_vbox = QP.VBoxLayout()
QP.AddToLayout( left_vbox, self._simple_whitelist_global_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
left_vbox.addStretch( 1 )
QP.AddToLayout( left_vbox, self._simple_whitelist_namespace_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
right_vbox = QP.VBoxLayout()
QP.AddToLayout( right_vbox, self._simple_whitelist, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( right_vbox, self._simple_whitelist_input, CC.FLAGS_EXPAND_PERPENDICULAR )
main_hbox = QP.HBoxLayout()
QP.AddToLayout( main_hbox, left_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( main_hbox, right_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._simple_whitelist_error_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, main_hbox, CC.FLAGS_EXPAND_BOTH_WAYS )
whitelist_panel.setLayout( vbox )
self._simple_whitelist.tagsRemoved.connect( self._SimpleWhitelistRemoved )
return whitelist_panel
def _LoadFavourite( self ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'load {}'.format( name ), self.SetValue, tag_filter )
CGC.core().PopupMenu( self, menu )
def _SaveFavourite( self ):
with ClientGUIDialogs.DialogTextEntry( self, 'Enter a name for the favourite.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
name = dlg.GetValue()
tag_filter = self.GetValue()
if name in names_to_tag_filters:
message = '"{}" already exists! Overwrite?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
names_to_tag_filters[ name ] = tag_filter
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
def _ShowAllPanels( self ):
self._whitelist_panel.setVisible( True )
self._advanced_panel.setVisible( True )
self._notebook.addTab( self._whitelist_panel, 'whitelist' )
self._notebook.addTab( self._advanced_panel, 'advanced' )
self._show_all_panels_button.setVisible( False )
def _ShowHelp( self ):
help = 'Here you can set rules to filter tags for one purpose or another. The default is typically to permit all tags. Check the current filter summary text at the bottom-left of the panel to ensure you have your logic correct.'
help += os.linesep * 2
help += 'The different tabs are multiple ways of looking at the filter--sometimes it is more useful to think about a filter as a whitelist (where only the listed contents are kept) or a blacklist (where everything _except_ the listed contents are kept), and there is also an advanced tab that lets you do a more complicated combination of the two.'
help += os.linesep * 2
help += 'As well as selecting broader categories of tags with the checkboxes, you can type or paste the individual tags directly--just hit enter to add each one--and double-click an existing entry in a list to remove it.'
help += os.linesep * 2
help += 'If you wish to manually type a special tag, use these shorthands:'
help += os.linesep * 2
help += '"namespace:" - all instances of that namespace'
help += os.linesep
help += '":" - all namespaced tags'
help += os.linesep
help += '"" (i.e. an empty string) - all unnamespaced tags'
QW.QMessageBox.information( self, 'Information', help )
def _ShowRedundantError( self, text ):
self._redundant_st.setText( text )
HG.client_controller.CallLaterQtSafe( self._redundant_st, 2, self._redundant_st.setText, '' )
def _SimpleAddBlacklistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _SimpleAddWhitelistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
if tag_slice in ( '', ':' ) and tag_slice in self._simple_whitelist.GetTagSlices():
self._AdvancedAddBlacklist( tag_slice )
else:
self._AdvancedAddWhitelist( tag_slice )
def _SimpleBlacklistRemoved( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _SimpleBlacklistReset( self ):
pass
def _SimpleWhitelistRemoved( self, tag_slices ):
tag_slices = set( tag_slices )
for simple in ( '', ':' ):
if simple in tag_slices:
tag_slices.discard( simple )
self._AdvancedAddBlacklist( simple )
for tag_slice in tag_slices:
self._AdvancedAddWhitelist( tag_slice )
def _SimpleWhitelistReset( self ):
pass
def _UpdateStatus( self ):
( whitelist_possible, blacklist_possible ) = self._GetWhiteBlacklistsPossible()
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if whitelist_possible:
self._simple_whitelist_error_st.clear()
self._simple_whitelist.setEnabled( True )
self._simple_whitelist_global_checkboxes.setEnabled( True )
self._simple_whitelist_input.setEnabled( True )
whitelist_tag_slices = set( whitelist_tag_slices )
if not self._CurrentlyBlocked( '' ):
whitelist_tag_slices.add( '' )
if not self._CurrentlyBlocked( ':' ):
whitelist_tag_slices.add( ':' )
self._simple_whitelist_namespace_checkboxes.setEnabled( False )
else:
self._simple_whitelist_namespace_checkboxes.setEnabled( True )
self._simple_whitelist.SetTagSlices( whitelist_tag_slices )
for index in range( self._simple_whitelist_global_checkboxes.count() ):
check = QP.GetClientData( self._simple_whitelist_global_checkboxes, index ) in whitelist_tag_slices
self._simple_whitelist_global_checkboxes.Check( index, check )
for index in range( self._simple_whitelist_namespace_checkboxes.count() ):
check = QP.GetClientData( self._simple_whitelist_namespace_checkboxes, index ) in whitelist_tag_slices
self._simple_whitelist_namespace_checkboxes.Check( index, check )
else:
self._simple_whitelist_error_st.setText( 'The filter is currently more complicated than a simple whitelist, so cannot be shown here.' )
self._simple_whitelist.setEnabled( False )
self._simple_whitelist_global_checkboxes.setEnabled( False )
self._simple_whitelist_namespace_checkboxes.setEnabled( False )
self._simple_whitelist_input.setEnabled( False )
self._simple_whitelist.SetTagSlices( '' )
for index in range( self._simple_whitelist_global_checkboxes.count() ):
self._simple_whitelist_global_checkboxes.Check( index, False )
for index in range( self._simple_whitelist_namespace_checkboxes.count() ):
self._simple_whitelist_namespace_checkboxes.Check( index, False )
#
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if blacklist_possible:
self._simple_blacklist_error_st.clear()
self._simple_blacklist.setEnabled( True )
self._simple_blacklist_global_checkboxes.setEnabled( True )
self._simple_blacklist_input.setEnabled( True )
if self._CurrentlyBlocked( ':' ):
self._simple_blacklist_namespace_checkboxes.setEnabled( False )
else:
self._simple_blacklist_namespace_checkboxes.setEnabled( True )
self._simple_blacklist.SetTagSlices( blacklist_tag_slices )
for index in range( self._simple_blacklist_global_checkboxes.count() ):
check = QP.GetClientData( self._simple_blacklist_global_checkboxes, index ) in blacklist_tag_slices
self._simple_blacklist_global_checkboxes.Check( index, check )
for index in range( self._simple_blacklist_namespace_checkboxes.count() ):
check = QP.GetClientData( self._simple_blacklist_namespace_checkboxes, index ) in blacklist_tag_slices
self._simple_blacklist_namespace_checkboxes.Check( index, check )
else:
self._simple_blacklist_error_st.setText( 'The filter is currently more complicated than a simple blacklist, so cannot be shown here.' )
self._simple_blacklist.setEnabled( False )
self._simple_blacklist_global_checkboxes.setEnabled( False )
self._simple_blacklist_namespace_checkboxes.setEnabled( False )
self._simple_blacklist_input.setEnabled( False )
self._simple_blacklist.SetTagSlices( '' )
for index in range( self._simple_blacklist_global_checkboxes.count() ):
self._simple_blacklist_global_checkboxes.Check( index, False )
for index in range( self._simple_blacklist_namespace_checkboxes.count() ):
self._simple_blacklist_namespace_checkboxes.Check( index, False )
#
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if len( blacklist_tag_slices ) == 0:
self._advanced_whitelist_input.setEnabled( False )
self._advanced_add_whitelist_button.setEnabled( False )
else:
self._advanced_whitelist_input.setEnabled( True )
self._advanced_add_whitelist_button.setEnabled( True )
#
tag_filter = self.GetValue()
if self._only_show_blacklist:
pretty_tag_filter = tag_filter.ToBlacklistString()
else:
pretty_tag_filter = 'currently keeping: {}'.format( tag_filter.ToPermittedString() )
self._current_filter_st.setText( pretty_tag_filter )
self._UpdateTest()
def _UpdateTest( self ):
test_input = self._test_input.toPlainText()
if test_input == '':
if self._only_show_blacklist:
test_result_text = self.TEST_RESULT_BLACKLIST_DEFAULT
else:
test_result_text = self.TEST_RESULT_DEFAULT
self._test_result_st.setObjectName( '' )
self._test_result_st.setText( test_result_text )
self._test_result_st.style().polish( self._test_result_st )
else:
test_tags = HydrusText.DeserialiseNewlinedTexts( test_input )
test_tags = HydrusTags.CleanTags( test_tags )
tag_filter = self.GetValue()
self._test_result_st.setObjectName( '' )
self._test_result_st.clear()
self._test_result_st.style().polish( self._test_result_st )
if self._only_show_blacklist:
def work_callable():
results = []
tags_to_siblings = HG.client_controller.Read( 'tag_siblings_lookup', CC.COMBINED_TAG_SERVICE_KEY, test_tags )
for ( test_tag, siblings ) in tags_to_siblings.items():
results.append( False not in ( tag_filter.TagOK( sibling_tag, apply_unnamespaced_rules_to_namespaced_tags = True ) for sibling_tag in siblings ) )
return results
else:
def work_callable():
results = [ tag_filter.TagOK( test_tag ) for test_tag in test_tags ]
return results
def publish_callable( results ):
all_good = False not in results
all_bad = True not in results
if len( results ) == 1:
if all_good:
test_result_text = 'tag passes!'
self._test_result_st.setObjectName( 'HydrusValid' )
else:
test_result_text = 'tag blocked!'
self._test_result_st.setObjectName( 'HydrusInvalid' )
else:
if all_good:
test_result_text = 'all pass!'
self._test_result_st.setObjectName( 'HydrusValid' )
elif all_bad:
test_result_text = 'all blocked!'
self._test_result_st.setObjectName( 'HydrusInvalid' )
else:
c = collections.Counter()
c.update( results )
test_result_text = '{} pass, {} blocked!'.format( HydrusData.ToHumanInt( c[ True ] ), HydrusData.ToHumanInt( c[ False ] ) )
self._test_result_st.setObjectName( 'HydrusInvalid' )
self._test_result_st.setText( test_result_text )
self._test_result_st.style().polish( self._test_result_st )
async_job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable )
async_job.start()
def EventSimpleBlacklistNamespaceCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_blacklist_namespace_checkboxes, index )
self._AdvancedAddBlacklist( tag_slice )
def EventSimpleBlacklistGlobalCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_blacklist_global_checkboxes, index )
self._AdvancedAddBlacklist( tag_slice )
def EventSimpleWhitelistNamespaceCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_whitelist_namespace_checkboxes, index )
self._AdvancedAddWhitelist( tag_slice )
def EventSimpleWhitelistGlobalCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_whitelist_global_checkboxes, index )
if tag_slice in ( '', ':' ) and tag_slice in self._simple_whitelist.GetTagSlices():
self._AdvancedAddBlacklist( tag_slice )
else:
self._AdvancedAddWhitelist( tag_slice )
def GetValue( self ):
tag_filter = HydrusTags.TagFilter()
for tag_slice in self._advanced_blacklist.GetTagSlices():
tag_filter.SetRule( tag_slice, HC.FILTER_BLACKLIST )
for tag_slice in self._advanced_whitelist.GetTagSlices():
tag_filter.SetRule( tag_slice, HC.FILTER_WHITELIST )
return tag_filter
def SetValue( self, tag_filter: HydrusTags.TagFilter ):
blacklist_tag_slices = [ tag_slice for ( tag_slice, rule ) in tag_filter.GetTagSlicesToRules().items() if rule == HC.FILTER_BLACKLIST ]
whitelist_tag_slices = [ tag_slice for ( tag_slice, rule ) in tag_filter.GetTagSlicesToRules().items() if rule == HC.FILTER_WHITELIST ]
self._advanced_blacklist.SetTagSlices( blacklist_tag_slices )
self._advanced_whitelist.SetTagSlices( whitelist_tag_slices )
( whitelist_possible, blacklist_possible ) = self._GetWhiteBlacklistsPossible()
selection_tests = []
if self._only_show_blacklist:
selection_tests.append( ( blacklist_possible, self._blacklist_panel ) )
else:
selection_tests.append( ( whitelist_possible, self._whitelist_panel ) )
selection_tests.append( ( blacklist_possible, self._blacklist_panel ) )
selection_tests.append( ( True, self._advanced_panel ) )
for ( test, page ) in selection_tests:
if test:
self._notebook.SelectPage( page )
break
self._UpdateStatus()
class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, file_service_key, media, immediate_commit = False, canvas_key = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._file_service_key = file_service_key
self._immediate_commit = immediate_commit
self._canvas_key = canvas_key
media = ClientMedia.FlattenMedia( media )
self._current_media = [ m.Duplicate() for m in media ]
self._hashes = set()
for m in self._current_media:
self._hashes.update( m.GetHashes() )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
#
services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_repositories, self._file_service_key, service.GetServiceKey(), self._current_media, self._immediate_commit, canvas_key = self._canvas_key )
page._add_tag_box.selectUp.connect( self.EventSelectUp )
page._add_tag_box.selectDown.connect( self.EventSelectDown )
page._add_tag_box.showPrevious.connect( self.EventShowPrevious )
page._add_tag_box.showNext.connect( self.EventShowNext )
page.okSignal.connect( self.okSignal )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentIndex( self._tag_repositories.count() - 1 )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
if self._canvas_key is not None:
HG.client_controller.sub( self, 'CanvasHasNewMedia', 'canvas_new_display_media' )
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'media', 'main_gui' ] )
self._tag_repositories.currentChanged.connect( self.EventServiceChanged )
self._SetSearchFocus()
def _GetGroupsOfServiceKeysToContentUpdates( self ):
groups_of_service_keys_to_content_updates = []
for page in self._tag_repositories.GetPages():
( service_key, groups_of_content_updates ) = page.GetGroupsOfContentUpdates()
for content_updates in groups_of_content_updates:
if len( content_updates ) > 0:
service_keys_to_content_updates = { service_key : content_updates }
groups_of_service_keys_to_content_updates.append( service_keys_to_content_updates )
return groups_of_service_keys_to_content_updates
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CanvasHasNewMedia( self, canvas_key, new_media_singleton ):
if canvas_key == self._canvas_key:
if new_media_singleton is not None:
self._current_media = ( new_media_singleton.Duplicate(), )
for page in self._tag_repositories.GetPages():
page.SetMedia( self._current_media )
def CleanBeforeDestroy( self ):
ClientGUIScrolledPanels.ManagePanel.CleanBeforeDestroy( self )
for page in self._tag_repositories.GetPages():
page.CleanBeforeDestroy()
def CommitChanges( self ):
groups_of_service_keys_to_content_updates = self._GetGroupsOfServiceKeysToContentUpdates()
for service_keys_to_content_updates in groups_of_service_keys_to_content_updates:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def EventSelectDown( self ):
self._tag_repositories.SelectRight()
self._SetSearchFocus()
def EventSelectUp( self ):
self._tag_repositories.SelectLeft()
self._SetSearchFocus()
def EventShowNext( self ):
if self._canvas_key is not None:
HG.client_controller.pub( 'canvas_show_next', self._canvas_key )
def EventShowPrevious( self ):
if self._canvas_key is not None:
HG.client_controller.pub( 'canvas_show_previous', self._canvas_key )
def EventServiceChanged( self, index ):
if not self or not QP.isValid( self ): # actually did get a runtime error here, on some Linux WM dialog shutdown
return
if self.sender() != self._tag_repositories:
return
page = self._tag_repositories.currentWidget()
if page is not None:
HG.client_controller.CallAfterQtSafe( page, page.SetTagBoxFocus )
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
data = command.GetData()
if command.IsSimpleCommand():
action = data
if action == CAC.SIMPLE_MANAGE_FILE_TAGS:
self._OKParent()
elif action == CAC.SIMPLE_FOCUS_MEDIA_VIEWER:
tlws = ClientGUIFunctions.GetTLWParents( self )
from hydrus.client.gui import ClientGUICanvasFrame
command_processed = False
for tlw in tlws:
if isinstance( tlw, ClientGUICanvasFrame.CanvasFrame ):
tlw.TakeFocusForUser()
command_processed = True
break
elif action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self._SetSearchFocus()
else:
command_processed = False
else:
command_processed = False
return command_processed
def UserIsOKToCancel( self ):
groups_of_service_keys_to_content_updates = self._GetGroupsOfServiceKeysToContentUpdates()
if len( groups_of_service_keys_to_content_updates ) > 0:
message = 'Are you sure you want to cancel? You have uncommitted changes that will be lost.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
class _Panel( QW.QWidget ):
okSignal = QC.Signal()
def __init__( self, parent, file_service_key, tag_service_key, media, immediate_commit, canvas_key = None ):
QW.QWidget.__init__( self, parent )
self._file_service_key = file_service_key
self._tag_service_key = tag_service_key
self._immediate_commit = immediate_commit
self._canvas_key = canvas_key
self._groups_of_content_updates = []
self._service = HG.client_controller.services_manager.GetService( self._tag_service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._tags_box_sorter = ClientGUIListBoxes.StaticBoxSorterForListBoxTags( self, 'tags', show_siblings_sort = True )
self._tags_box = ClientGUIListBoxes.ListBoxTagsMediaTagsDialog( self._tags_box_sorter, self.EnterTags, self.RemoveTags )
self._tags_box_sorter.SetTagsBox( self._tags_box )
#
self._new_options = HG.client_controller.new_options
if self._i_am_local_tag_service:
text = 'remove all/selected tags'
else:
text = 'petition to remove all/selected tags'
self._remove_tags = ClientGUICommon.BetterButton( self._tags_box_sorter, text, self._RemoveTagsButton )
self._copy_button = ClientGUICommon.BetterBitmapButton( self._tags_box_sorter, CC.global_pixmaps().copy, self._Copy )
self._copy_button.setToolTip( 'Copy selected tags to the clipboard. If none are selected, copies all.' )
self._paste_button = ClientGUICommon.BetterBitmapButton( self._tags_box_sorter, CC.global_pixmaps().paste, self._Paste )
self._paste_button.setToolTip( 'Paste newline-separated tags from the clipboard into here.' )
self._show_deleted = False
menu_items = []
check_manager = ClientGUICommon.CheckboxManagerOptions( 'allow_remove_on_manage_tags_input' )
menu_items.append( ( 'check', 'allow remove/petition result on tag input for already existing tag', 'If checked, inputting a tag that already exists will try to remove it.', check_manager ) )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'yes_no_on_remove_on_manage_tags' )
menu_items.append( ( 'check', 'confirm remove/petition tags on explicit delete actions', 'If checked, clicking the remove/petition tags button (or hitting the deleted key on the list) will first confirm the action with a yes/no dialog.', check_manager ) )
check_manager = ClientGUICommon.CheckboxManagerCalls( self._FlipShowDeleted, lambda: self._show_deleted )
menu_items.append( ( 'check', 'show deleted', 'Show deleted tags, if any.', check_manager ) )
menu_items.append( ( 'separator', 0, 0, 0 ) )
menu_items.append( ( 'normal', 'migrate tags for these files', 'Migrate the tags for the files used to launch this manage tags panel.', self._MigrateTags ) )
if not self._i_am_local_tag_service and self._service.HasPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_MODERATE ):
menu_items.append( ( 'separator', 0, 0, 0 ) )
menu_items.append( ( 'normal', 'modify users who added the selected tags', 'Modify the users who added the selected tags.', self._ModifyMappers ) )
self._cog_button = ClientGUIMenuButton.MenuBitmapButton( self._tags_box_sorter, CC.global_pixmaps().cog, menu_items )
#
self._add_tag_box = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.AddTags, self._file_service_key, self._tag_service_key, null_entry_callable = self.OK )
self._tags_box.SetTagServiceKey( self._tag_service_key )
self._suggested_tags = ClientGUITagSuggestions.SuggestedTagsPanel( self, self._tag_service_key, media, self.AddTags )
self.SetMedia( media )
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._remove_tags, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._copy_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._paste_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._cog_button, CC.FLAGS_CENTER )
self._tags_box_sorter.Add( button_hbox, CC.FLAGS_ON_RIGHT )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tags_box_sorter, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add_tag_box )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._suggested_tags, CC.FLAGS_EXPAND_BOTH_WAYS_POLITE )
QP.AddToLayout( hbox, vbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
#
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'main_gui' ] )
self.setLayout( hbox )
if self._immediate_commit:
HG.client_controller.sub( self, 'ProcessContentUpdates', 'content_updates_gui' )
self._suggested_tags.mouseActivationOccurred.connect( self.SetTagBoxFocus )
def _EnterTags( self, tags, only_add = False, only_remove = False, forced_reason = None ):
tags = HydrusTags.CleanTags( tags )
if not self._i_am_local_tag_service and self._service.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_MODERATE ):
forced_reason = 'admin'
tags_managers = [ m.GetTagsManager() for m in self._media ]
currents = [ tags_manager.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
pendings = [ tags_manager.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
petitioneds = [ tags_manager.GetPetitioned( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
num_files = len( self._media )
# let's figure out what these tags can mean for the media--add, remove, or what?
choices = collections.defaultdict( list )
for tag in tags:
num_current = sum( ( 1 for current in currents if tag in current ) )
if self._i_am_local_tag_service:
if not only_remove:
if num_current < num_files:
num_non_current = num_files - num_current
choices[ HC.CONTENT_UPDATE_ADD ].append( ( tag, num_non_current ) )
if not only_add:
if num_current > 0:
choices[ HC.CONTENT_UPDATE_DELETE ].append( ( tag, num_current ) )
else:
num_pending = sum( ( 1 for pending in pendings if tag in pending ) )
num_petitioned = sum( ( 1 for petitioned in petitioneds if tag in petitioned ) )
if not only_remove:
if num_current + num_pending < num_files:
num_pendable = num_files - ( num_current + num_pending )
choices[ HC.CONTENT_UPDATE_PEND ].append( ( tag, num_pendable ) )
if not only_add:
if num_current > num_petitioned and not only_add:
num_petitionable = num_current - num_petitioned
choices[ HC.CONTENT_UPDATE_PETITION ].append( ( tag, num_petitionable ) )
if num_pending > 0 and not only_add:
choices[ HC.CONTENT_UPDATE_RESCIND_PEND ].append( ( tag, num_pending ) )
if not only_remove:
if num_petitioned > 0:
choices[ HC.CONTENT_UPDATE_RESCIND_PETITION ].append( ( tag, num_petitioned ) )
if len( choices ) == 0:
return
# now we have options, let's ask the user what they want to do
if len( choices ) == 1:
[ ( choice_action, tag_counts ) ] = list( choices.items() )
tags = { tag for ( tag, count ) in tag_counts }
else:
bdc_choices = []
preferred_order = [ HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_PETITION, HC.CONTENT_UPDATE_RESCIND_PETITION ]
choice_text_lookup = {}
choice_text_lookup[ HC.CONTENT_UPDATE_ADD ] = 'add'
choice_text_lookup[ HC.CONTENT_UPDATE_DELETE ] = 'delete'
choice_text_lookup[ HC.CONTENT_UPDATE_PEND ] = 'pend (add)'
choice_text_lookup[ HC.CONTENT_UPDATE_PETITION ] = 'petition to remove'
choice_text_lookup[ HC.CONTENT_UPDATE_RESCIND_PEND ] = 'undo pend'
choice_text_lookup[ HC.CONTENT_UPDATE_RESCIND_PETITION ] = 'undo petition to remove'
choice_tooltip_lookup = {}
choice_tooltip_lookup[ HC.CONTENT_UPDATE_ADD ] = 'this adds the tags to this local tag service'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_DELETE ] = 'this deletes the tags from this local tag service'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_PEND ] = 'this pends the tags to be added to this tag repository when you upload'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_PETITION ] = 'this petitions the tags for deletion from this tag repository when you upload'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_RESCIND_PEND ] = 'this rescinds the currently pending tags, so they will not be added'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_RESCIND_PETITION ] = 'this rescinds the current tag petitions, so they will not be deleted'
for choice_action in preferred_order:
if choice_action not in choices:
continue
choice_text_prefix = choice_text_lookup[ choice_action ]
tag_counts = choices[ choice_action ]
choice_tags = { tag for ( tag, count ) in tag_counts }
if len( choice_tags ) == 1:
[ ( tag, count ) ] = tag_counts
text = '{} "{}" for {} files'.format( choice_text_prefix, HydrusText.ElideText( tag, 64 ), HydrusData.ToHumanInt( count ) )
else:
text = '{} {} tags'.format( choice_text_prefix, HydrusData.ToHumanInt( len( choice_tags ) ) )
data = ( choice_action, choice_tags )
t_c_lines = [ choice_tooltip_lookup[ choice_action ] ]
if len( tag_counts ) > 25:
t_c = tag_counts[:25]
else:
t_c = tag_counts
t_c_lines.extend( ( '{} - {} files'.format( tag, HydrusData.ToHumanInt( count ) ) for ( tag, count ) in t_c ) )
if len( tag_counts ) > 25:
t_c_lines.append( 'and {} others'.format( HydrusData.ToHumanInt( len( tag_counts ) - 25 ) ) )
tooltip = os.linesep.join( t_c_lines )
bdc_choices.append( ( text, data, tooltip ) )
try:
if len( tags ) > 1:
message = 'The file{} some of those tags, but not all, so there are different things you can do.'.format( 's have' if len( self._media ) > 1 else ' has' )
else:
message = 'Of the {} files being managed, some have that tag, but not all of them do, so there are different things you can do.'.format( HydrusData.ToHumanInt( len( self._media ) ) )
( choice_action, tags ) = ClientGUIDialogsQuick.SelectFromListButtons( self, 'What would you like to do?', bdc_choices, message = message )
except HydrusExceptions.CancelledException:
return
reason = None
if choice_action == HC.CONTENT_UPDATE_PETITION:
if forced_reason is None:
# add the easy reason buttons here
if len( tags ) == 1:
( tag, ) = tags
tag_text = '"' + tag + '"'
else:
tag_text = 'the ' + HydrusData.ToHumanInt( len( tags ) ) + ' tags'
message = 'Enter a reason for ' + tag_text + ' to be removed. A janitor will review your petition.'
suggestions = []
suggestions.append( 'mangled parse/typo' )
suggestions.append( 'not applicable' )
suggestions.append( 'should be namespaced' )
suggestions.append( 'splitting filename/title/etc... into individual tags' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
return
else:
reason = forced_reason
# we have an action and tags, so let's effect the content updates
content_updates_group = []
recent_tags = set()
medias_and_tags_managers = [ ( m, m.GetTagsManager() ) for m in self._media ]
medias_and_sets_of_tags = [ ( m, tm.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ), tm.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ), tm.GetPetitioned( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) ) for ( m, tm ) in medias_and_tags_managers ]
# there is a big CPU hit here as every time you processcontentupdates, the tagsmanagers need to regen caches lmao
# so if I refetch current tags etc... for every tag loop, we end up getting 16 million tagok calls etc...
# however, as tags is a set, thus with unique members, let's say for now this is ok, don't need to regen just to consult current
for tag in tags:
if choice_action == HC.CONTENT_UPDATE_ADD: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag not in mc ]
elif choice_action == HC.CONTENT_UPDATE_DELETE: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mc ]
elif choice_action == HC.CONTENT_UPDATE_PEND: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag not in mc and tag not in mp ]
elif choice_action == HC.CONTENT_UPDATE_PETITION: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mc and tag not in mpt ]
elif choice_action == HC.CONTENT_UPDATE_RESCIND_PEND: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mp ]
elif choice_action == HC.CONTENT_UPDATE_RESCIND_PETITION: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mpt ]
hashes = set( itertools.chain.from_iterable( ( m.GetHashes() for m in media_to_affect ) ) )
if len( hashes ) > 0:
content_updates = []
if choice_action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_PEND ):
recent_tags.add( tag )
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, choice_action, ( tag, hashes ), reason = reason ) )
if len( content_updates ) > 0:
if not self._immediate_commit:
for m in media_to_affect:
mt = m.GetTagsManager()
for content_update in content_updates:
mt.ProcessContentUpdate( self._tag_service_key, content_update )
content_updates_group.extend( content_updates )
num_recent_tags = HG.client_controller.new_options.GetNoneableInteger( 'num_recent_tags' )
if len( recent_tags ) > 0 and num_recent_tags is not None:
if len( recent_tags ) > num_recent_tags:
recent_tags = random.sample( recent_tags, num_recent_tags )
HG.client_controller.Write( 'push_recent_tags', self._tag_service_key, recent_tags )
if len( content_updates_group ) > 0:
if self._immediate_commit:
service_keys_to_content_updates = { self._tag_service_key : content_updates_group }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
else:
self._groups_of_content_updates.append( content_updates_group )
self._suggested_tags.MediaUpdated()
self._tags_box.SetTagsByMedia( self._media )
def _MigrateTags( self ):
hashes = set()
for m in self._media:
hashes.update( m.GetHashes() )
def do_it( tag_service_key, hashes ):
tlw = HG.client_controller.GetMainTLW()
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( tlw, 'migrate tags' )
panel = ClientGUIScrolledPanelsReview.MigrateTagsPanel( frame, self._tag_service_key, hashes )
frame.SetPanel( panel )
QP.CallAfter( do_it, self._tag_service_key, hashes )
self.OK()
def _Copy( self ):
tags = list( self._tags_box.GetSelectedTags() )
if len( tags ) == 0:
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( self._media, self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE )
tags = set( current_tags_to_count.keys() ).union( pending_tags_to_count.keys() )
if len( tags ) > 0:
tags = HydrusTags.SortNumericTags( tags )
text = os.linesep.join( tags )
HG.client_controller.pub( 'clipboard', 'text', text )
def _FlipShowDeleted( self ):
self._show_deleted = not self._show_deleted
self._tags_box.SetShow( 'deleted', self._show_deleted )
def _ModifyMappers( self ):
contents = []
tags = self._tags_box.GetSelectedTags()
if len( tags ) == 0:
QW.QMessageBox.information( self, 'No tags selected!', 'Please select some tags first!' )
return
hashes_and_current_tags = [ ( m.GetHashes(), m.GetTagsManager().GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) ) for m in self._media ]
for tag in tags:
hashes_iter = itertools.chain.from_iterable( ( hashes for ( hashes, current_tags ) in hashes_and_current_tags if tag in current_tags ) )
contents.extend( [ HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPING, ( tag, hash ) ) for hash in hashes_iter ] )
if len( contents ) > 0:
subject_account_identifiers = [ HydrusNetwork.AccountIdentifier( content = content ) for content in contents ]
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self.window().parentWidget(), 'manage accounts' )
panel = ClientGUIHydrusNetwork.ModifyAccountsPanel( frame, self._tag_service_key, subject_account_identifiers )
frame.SetPanel( panel )
def _Paste( self ):
try:
text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.warning( self, 'Warning', str(e) )
return
try:
tags = HydrusText.DeserialiseNewlinedTexts( text )
tags = HydrusTags.CleanTags( tags )
self.AddTags( tags, only_add = True )
except Exception as e:
QW.QMessageBox.warning( self, 'Warning', 'I could not understand what was in the clipboard' )
def _RemoveTagsButton( self ):
tags_managers = [ m.GetTagsManager() for m in self._media ]
removable_tags = set()
for tags_manager in tags_managers:
removable_tags.update( tags_manager.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) )
removable_tags.update( tags_manager.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) )
selected_tags = list( self._tags_box.GetSelectedTags() )
if len( selected_tags ) == 0:
tags_to_remove = list( removable_tags )
else:
tags_to_remove = [ tag for tag in selected_tags if tag in removable_tags ]
tags_to_remove = HydrusTags.SortNumericTags( tags_to_remove )
self.RemoveTags( tags_to_remove )
def AddTags( self, tags, only_add = False ):
if not self._new_options.GetBoolean( 'allow_remove_on_manage_tags_input' ):
only_add = True
if len( tags ) > 0:
self.EnterTags( tags, only_add = only_add )
def CleanBeforeDestroy( self ):
self._add_tag_box.CancelCurrentResultsFetchJob()
def ClearMedia( self ):
self.SetMedia( set() )
def EnterTags( self, tags, only_add = False ):
if len( tags ) > 0:
self._EnterTags( tags, only_add = only_add )
def GetGroupsOfContentUpdates( self ):
return ( self._tag_service_key, self._groups_of_content_updates )
def HasChanges( self ):
return len( self._groups_of_content_updates ) > 0
def OK( self ):
self.okSignal.emit()
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
data = command.GetData()
if command.IsSimpleCommand():
action = data
if action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self.SetTagBoxFocus()
elif action in ( CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_FAVOURITE_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_RELATED_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_FILE_LOOKUP_SCRIPT_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_RECENT_TAGS ):
self._suggested_tags.TakeFocusForUser( action )
elif action == CAC.SIMPLE_REFRESH_RELATED_TAGS:
self._suggested_tags.RefreshRelatedThorough()
else:
command_processed = False
else:
command_processed = False
return command_processed
def ProcessContentUpdates( self, service_keys_to_content_updates ):
for ( service_key, content_updates ) in list(service_keys_to_content_updates.items()):
for content_update in content_updates:
for m in self._media:
if HydrusData.SetsIntersect( m.GetHashes(), content_update.GetHashes() ):
m.GetMediaResult().ProcessContentUpdate( service_key, content_update )
self._tags_box.SetTagsByMedia( self._media )
self._suggested_tags.MediaUpdated()
def RemoveTags( self, tags ):
if len( tags ) > 0:
if self._new_options.GetBoolean( 'yes_no_on_remove_on_manage_tags' ):
if len( tags ) < 10:
message = 'Are you sure you want to remove these tags:'
message += os.linesep * 2
message += os.linesep.join( ( HydrusText.ElideText( tag, 64 ) for tag in tags ) )
else:
message = 'Are you sure you want to remove these ' + HydrusData.ToHumanInt( len( tags ) ) + ' tags?'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
self._EnterTags( tags, only_remove = True )
def SetMedia( self, media ):
if media is None:
media = set()
self._media = media
self._tags_box.SetTagsByMedia( self._media )
self._suggested_tags.SetMedia( media )
def SetTagBoxFocus( self ):
self._add_tag_box.setFocus( QC.Qt.OtherFocusReason )
class ManageTagParents( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, tags = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
#
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
services = list( HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, ) ) )
services.extend( [ service for service in HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) ) if service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ) ] )
for service in services:
name = service.GetName()
service_key = service.GetServiceKey()
page = self._Panel( self._tag_repositories, service_key, tags )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentWidget( page )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CommitChanges( self ):
service_keys_to_content_updates = {}
for page in self._tag_repositories.GetPages():
( service_key, content_updates ) = page.GetContentUpdates()
if len( content_updates ) > 0:
service_keys_to_content_updates[ service_key ] = content_updates
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )
def UserIsOKToOK( self ):
if self._tag_repositories.currentWidget().HasUncommittedPair():
message = 'Are you sure you want to OK? You have an uncommitted pair.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key, tags = None ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._service = HG.client_controller.services_manager.GetService( self._service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._pairs_to_reasons = {}
self._original_statuses_to_pairs = collections.defaultdict( set )
self._current_statuses_to_pairs = collections.defaultdict( set )
self._show_all = QW.QCheckBox( self )
listctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
self._tag_parents = ClientGUIListCtrl.BetterListCtrl( listctrl_panel, CGLC.COLUMN_LIST_TAG_PARENTS.ID, 8, self._ConvertPairToListCtrlTuples, delete_key_callback = self._ListCtrlActivated, activation_callback = self._ListCtrlActivated )
listctrl_panel.SetListCtrl( self._tag_parents )
self._tag_parents.Sort()
menu_items = []
menu_items.append( ( 'normal', 'from clipboard', 'Load parents from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, False ) ) )
menu_items.append( ( 'normal', 'from clipboard (only add pairs--no deletions)', 'Load parents from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, True ) ) )
menu_items.append( ( 'normal', 'from .txt file', 'Load parents from a .txt file.', HydrusData.Call( self._ImportFromTXT, False ) ) )
menu_items.append( ( 'normal', 'from .txt file (only add pairs--no deletions)', 'Load parents from a .txt file.', HydrusData.Call( self._ImportFromTXT, True ) ) )
listctrl_panel.AddMenuButton( 'import', menu_items )
menu_items = []
menu_items.append( ( 'normal', 'to clipboard', 'Save selected parents to your clipboard.', self._ExportToClipboard ) )
menu_items.append( ( 'normal', 'to .txt file', 'Save selected parents to a .txt file.', self._ExportToTXT ) )
listctrl_panel.AddMenuButton( 'export', menu_items, enabled_only_on_selection = True )
self._children = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
self._parents = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
( gumpf, preview_height ) = ClientGUIFunctions.ConvertTextToPixels( self._children, ( 12, 6 ) )
self._children.setMinimumHeight( preview_height )
self._parents.setMinimumHeight( preview_height )
self._child_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterChildren, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._child_input.setEnabled( False )
self._parent_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterParents, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._parent_input.setEnabled( False )
self._add = QW.QPushButton( 'add', self )
self._add.clicked.connect( self.EventAddButton )
self._add.setEnabled( False )
#
self._status_st = ClientGUICommon.BetterStaticText( self, 'initialising\u2026' + os.linesep + '.' )
self._sync_status_st = ClientGUICommon.BetterStaticText( self, '' )
self._sync_status_st.setWordWrap( True )
self._count_st = ClientGUICommon.BetterStaticText( self, '' )
#
children_vbox = QP.VBoxLayout()
QP.AddToLayout( children_vbox, ClientGUICommon.BetterStaticText( self, label = 'set children' ), CC.FLAGS_CENTER )
QP.AddToLayout( children_vbox, self._children, CC.FLAGS_EXPAND_BOTH_WAYS )
parents_vbox = QP.VBoxLayout()
QP.AddToLayout( parents_vbox, ClientGUICommon.BetterStaticText( self, label = 'set parents' ), CC.FLAGS_CENTER )
QP.AddToLayout( parents_vbox, self._parents, CC.FLAGS_EXPAND_BOTH_WAYS )
tags_box = QP.HBoxLayout()
QP.AddToLayout( tags_box, children_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( tags_box, parents_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
input_box = QP.HBoxLayout()
QP.AddToLayout( input_box, self._child_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( input_box, self._parent_input, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._count_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._show_all,self,'show all pairs'), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, listctrl_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, tags_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( vbox, input_box, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
#
self._tag_parents.itemSelectionChanged.connect( self._SetButtonStatus )
self._children.listBoxChanged.connect( self._UpdateListCtrlData )
self._parents.listBoxChanged.connect( self._UpdateListCtrlData )
self._show_all.clicked.connect( self._UpdateListCtrlData )
HG.client_controller.CallToThread( self.THREADInitialise, tags, self._service_key )
def _AddPairs( self, pairs, add_only = False ):
pairs = list( pairs )
pairs.sort( key = lambda c_p: HydrusTags.ConvertTagToSortable( c_p[1] ) )
new_pairs = []
current_pairs = []
petitioned_pairs = []
pending_pairs = []
for pair in pairs:
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
if not add_only:
pending_pairs.append( pair )
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
petitioned_pairs.append( pair )
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
if not add_only:
current_pairs.append( pair )
elif self._CanAdd( pair ):
new_pairs.append( pair )
affected_pairs = []
if len( new_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if self._service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( new_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in new_pairs ) )
message = 'Enter a reason for:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'To be added. A janitor will review your request.'
suggestions = []
suggestions.append( 'obvious by definition (a sword is a weapon)' )
suggestions.append( 'character/series/studio/etc... belonging (character x belongs to series y)' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in new_pairs: self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].update( new_pairs )
affected_pairs.extend( new_pairs )
else:
if len( current_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if len( current_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in current_pairs ) )
if len( current_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Already exist.'
else:
message = 'The pair ' + pair_strings + ' already exists.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'petition to remove', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
if self._service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
message = 'Enter a reason for:'
message += os.linesep * 2
message += pair_strings
message += os.linesep * 2
message += 'to be removed. A janitor will review your petition.'
suggestions = []
suggestions.append( 'obvious typo/mistake' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in current_pairs: self._pairs_to_reasons[ pair ] = reason
else:
do_it = False
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].update( current_pairs )
affected_pairs.extend( current_pairs )
if len( pending_pairs ) > 0:
if len( pending_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in pending_pairs ) )
if len( pending_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are pending.'
else:
message = 'The pair ' + pair_strings + ' is pending.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the pend', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].difference_update( pending_pairs )
affected_pairs.extend( pending_pairs )
if len( petitioned_pairs ) > 0:
if len( petitioned_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in petitioned_pairs ) )
if len( petitioned_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are petitioned.'
else:
message = 'The pair ' + pair_strings + ' is petitioned.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the petition', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].difference_update( petitioned_pairs )
affected_pairs.extend( petitioned_pairs )
if len( affected_pairs ) > 0:
def in_current( pair ):
for status in ( HC.CONTENT_STATUS_CURRENT, HC.CONTENT_STATUS_PENDING, HC.CONTENT_STATUS_PETITIONED ):
if pair in self._current_statuses_to_pairs[ status ]:
return True
return False
affected_pairs = [ ( self._tag_parents.HasData( pair ), in_current( pair ), pair ) for pair in affected_pairs ]
to_add = [ pair for ( exists, current, pair ) in affected_pairs if not exists ]
to_update = [ pair for ( exists, current, pair ) in affected_pairs if exists and current ]
to_delete = [ pair for ( exists, current, pair ) in affected_pairs if exists and not current ]
self._tag_parents.AddDatas( to_add )
self._tag_parents.UpdateDatas( to_update )
self._tag_parents.DeleteDatas( to_delete )
self._tag_parents.Sort()
def _CanAdd( self, potential_pair ):
( potential_child, potential_parent ) = potential_pair
if potential_child == potential_parent: return False
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_children = { child for ( child, parent ) in current_pairs }
# test for loops
if potential_parent in current_children:
simple_children_to_parents = ClientManagers.BuildSimpleChildrenToParents( current_pairs )
if ClientManagers.LoopInSimpleChildrenToParents( simple_children_to_parents, potential_child, potential_parent ):
QW.QMessageBox.critical( self, 'Error', 'Adding '+potential_child+'->'+potential_parent+' would create a loop!' )
return False
return True
def _ConvertPairToListCtrlTuples( self, pair ):
( child, parent ) = pair
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
status = HC.CONTENT_STATUS_PENDING
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
status = HC.CONTENT_STATUS_PETITIONED
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
status = HC.CONTENT_STATUS_CURRENT
sign = HydrusData.ConvertStatusToPrefix( status )
pretty_status = sign
display_tuple = ( pretty_status, child, parent )
sort_tuple = ( status, child, parent )
return ( display_tuple, sort_tuple )
def _DeserialiseImportString( self, import_string ):
tags = HydrusText.DeserialiseNewlinedTexts( import_string )
if len( tags ) % 2 == 1:
raise Exception( 'Uneven number of tags found!' )
pairs = []
for i in range( len( tags ) // 2 ):
pair = ( tags[ 2 * i ], tags[ ( 2 * i ) + 1 ] )
pairs.append( pair )
return pairs
def _ExportToClipboard( self ):
export_string = self._GetExportString()
HG.client_controller.pub( 'clipboard', 'text', export_string )
def _ExportToTXT( self ):
export_string = self._GetExportString()
with QP.FileDialog( self, 'Set the export path.', default_filename = 'parents.txt', acceptMode = QW.QFileDialog.AcceptSave, fileMode = QW.QFileDialog.AnyFile ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
with open( path, 'w', encoding = 'utf-8' ) as f:
f.write( export_string )
def _GetExportString( self ):
tags = []
for ( a, b ) in self._tag_parents.GetData( only_selected = True ):
tags.append( a )
tags.append( b )
export_string = os.linesep.join( tags )
return export_string
def _ImportFromClipboard( self, add_only = False ):
try:
import_string = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
pairs = self._DeserialiseImportString( import_string )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ImportFromTXT( self, add_only = False ):
with QP.FileDialog( self, 'Select the file to import.', acceptMode = QW.QFileDialog.AcceptOpen ) as dlg:
if dlg.exec() != QW.QDialog.Accepted:
return
else:
path = dlg.GetPath()
with open( path, 'r', encoding = 'utf-8' ) as f:
import_string = f.read()
pairs = self._DeserialiseImportString( import_string )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ListCtrlActivated( self ):
parents_to_children = collections.defaultdict( set )
pairs = self._tag_parents.GetData( only_selected = True )
if len( pairs ) > 0:
self._AddPairs( pairs )
def _SetButtonStatus( self ):
if len( self._children.GetTags() ) == 0 or len( self._parents.GetTags() ) == 0:
self._add.setEnabled( False )
else:
self._add.setEnabled( True )
def _UpdateListCtrlData( self ):
children = self._children.GetTags()
parents = self._parents.GetTags()
pertinent_tags = children.union( parents )
self._tag_parents.DeleteDatas( self._tag_parents.GetData() )
all_pairs = set()
show_all = self._show_all.isChecked()
for ( status, pairs ) in self._current_statuses_to_pairs.items():
if status == HC.CONTENT_STATUS_DELETED:
continue
if len( pertinent_tags ) == 0:
if status == HC.CONTENT_STATUS_CURRENT and not show_all:
continue
# show all pending/petitioned
all_pairs.update( pairs )
else:
# show all appropriate
for pair in pairs:
( a, b ) = pair
if a in pertinent_tags or b in pertinent_tags or show_all:
all_pairs.add( pair )
self._tag_parents.AddDatas( all_pairs )
self._tag_parents.Sort()
def EnterChildren( self, tags ):
if len( tags ) > 0:
self._parents.RemoveTags( tags )
self._children.EnterTags( tags )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EnterParents( self, tags ):
if len( tags ) > 0:
self._children.RemoveTags( tags )
self._parents.EnterTags( tags )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EventAddButton( self ):
children = self._children.GetTags()
parents = self._parents.GetTags()
pairs = list( itertools.product( children, parents ) )
self._AddPairs( pairs )
self._children.SetTags( [] )
self._parents.SetTags( [] )
self._UpdateListCtrlData()
self._SetButtonStatus()
def GetContentUpdates( self ):
# we make it manually here because of the mass pending tags done (but not undone on a rescind) on a pending pair!
# we don't want to send a pend and then rescind it, cause that will spam a thousand bad tags and not undo it
content_updates = []
if self._i_am_local_tag_service:
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]: content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_ADD, pair ) )
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]: content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DELETE, pair ) )
else:
current_pending = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
original_pending = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
current_petitioned = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
original_petitioned = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
new_pends = current_pending.difference( original_pending )
rescinded_pends = original_pending.difference( current_pending )
new_petitions = current_petitioned.difference( original_petitioned )
rescinded_petitions = original_petitioned.difference( current_petitioned )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PEND, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_RESCIND_PEND, pair ) for pair in rescinded_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PETITION, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_petitions ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_RESCIND_PETITION, pair ) for pair in rescinded_petitions ) )
return ( self._service_key, content_updates )
def HasUncommittedPair( self ):
return len( self._children.GetTags() ) > 0 and len( self._parents.GetTags() ) > 0
def SetTagBoxFocus( self ):
if len( self._children.GetTags() ) == 0: self._child_input.setFocus( QC.Qt.OtherFocusReason )
else: self._parent_input.setFocus( QC.Qt.OtherFocusReason )
def THREADInitialise( self, tags, service_key ):
def qt_code( original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do ):
if not self or not QP.isValid( self ):
return
self._original_statuses_to_pairs = original_statuses_to_pairs
self._current_statuses_to_pairs = current_statuses_to_pairs
self._status_st.setText( 'Files with a tag on the left will also be given the tag on the right.' + os.linesep + 'As an experiment, this panel will only display the \'current\' pairs for those tags entered below.' )
looking_good = True
if len( service_keys_to_work_to_do ) == 0:
looking_good = False
status_text = 'No services currently apply these parents. Changes here will have no effect unless parent application is changed later.'
else:
synced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if not work_to_do ) )
unsynced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if work_to_do ) )
synced_string = ', '.join( ( '"{}"'.format( name ) for name in synced_names ) )
unsynced_string = ', '.join( ( '"{}"'.format( name ) for name in unsynced_names ) )
if len( unsynced_names ) == 0:
service_part = '{} apply these parents and are fully synced.'.format( synced_string )
else:
looking_good = False
if len( synced_names ) > 0:
service_part = '{} apply these parents and are fully synced, but {} still have work to do.'.format( synced_string, unsynced_string )
else:
service_part = '{} apply these parents and still have sync work to do.'.format( unsynced_string )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
maintenance_part = 'Parents are set to sync all the time in the background.'
if looking_good:
changes_part = 'Changes from this dialog should be reflected soon after closing the dialog.'
else:
changes_part = 'It may take some time for changes here to apply everywhere, though.'
else:
looking_good = False
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
maintenance_part = 'Parents are set to sync only when you are not using the client.'
changes_part = 'It may take some time for changes here to apply.'
else:
maintenance_part = 'Parents are not set to sync.'
changes_part = 'Changes here will not apply unless sync is manually forced to run.'
s = os.linesep * 2
status_text = s.join( ( service_part, maintenance_part, changes_part ) )
self._sync_status_st.setText( status_text )
if looking_good:
self._sync_status_st.setObjectName( 'HydrusValid' )
else:
self._sync_status_st.setObjectName( 'HydrusWarning' )
self._sync_status_st.style().polish( self._sync_status_st )
self._count_st.setText( 'Starting with '+HydrusData.ToHumanInt(len(original_statuses_to_pairs[HC.CONTENT_STATUS_CURRENT]))+' pairs.' )
self._child_input.setEnabled( True )
self._parent_input.setEnabled( True )
if tags is None:
self._UpdateListCtrlData()
else:
self.EnterChildren( tags )
original_statuses_to_pairs = HG.client_controller.Read( 'tag_parents', service_key )
( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ) = HG.client_controller.Read( 'tag_display_application' )
service_keys_we_care_about = { s_k for ( s_k, s_ks ) in master_service_keys_to_parent_applicable_service_keys.items() if service_key in s_ks }
service_keys_to_work_to_do = {}
for s_k in service_keys_we_care_about:
status = HG.client_controller.Read( 'tag_display_maintenance_status', s_k )
work_to_do = status[ 'num_parents_to_sync' ] > 0
service_keys_to_work_to_do[ s_k ] = work_to_do
current_statuses_to_pairs = collections.defaultdict( set )
current_statuses_to_pairs.update( { key : set( value ) for ( key, value ) in list(original_statuses_to_pairs.items()) } )
QP.CallAfter( qt_code, original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do )
class ManageTagSiblings( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, tags = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
#
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
services = list( HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, ) ) )
services.extend( [ service for service in HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) ) if service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ) ] )
for service in services:
name = service.GetName()
service_key = service.GetServiceKey()
page = self._Panel( self._tag_repositories, service_key, tags )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentIndex( self._tag_repositories.indexOf( page ) )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CommitChanges( self ):
service_keys_to_content_updates = {}
for page in self._tag_repositories.GetPages():
( service_key, content_updates ) = page.GetContentUpdates()
if len( content_updates ) > 0:
service_keys_to_content_updates[ service_key ] = content_updates
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )
def UserIsOKToOK( self ):
if self._tag_repositories.currentWidget().HasUncommittedPair():
message = 'Are you sure you want to OK? You have an uncommitted pair.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
def EventServiceChanged( self, event ):
page = self._tag_repositories.currentWidget()
if page is not None:
HG.client_controller.CallAfterQtSafe( page, page.SetTagBoxFocus )
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key, tags = None ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._service = HG.client_controller.services_manager.GetService( self._service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._original_statuses_to_pairs = collections.defaultdict( set )
self._current_statuses_to_pairs = collections.defaultdict( set )
self._pairs_to_reasons = {}
self._current_new = None
self._show_all = QW.QCheckBox( self )
listctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
self._tag_siblings = ClientGUIListCtrl.BetterListCtrl( listctrl_panel, CGLC.COLUMN_LIST_TAG_SIBLINGS.ID, 8, self._ConvertPairToListCtrlTuples, delete_key_callback = self._ListCtrlActivated, activation_callback = self._ListCtrlActivated )
listctrl_panel.SetListCtrl( self._tag_siblings )
self._tag_siblings.Sort()
menu_items = []
menu_items.append( ( 'normal', 'from clipboard', 'Load siblings from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, False ) ) )
menu_items.append( ( 'normal', 'from clipboard (only add pairs--no deletions)', 'Load siblings from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, True ) ) )
menu_items.append( ( 'normal', 'from .txt file', 'Load siblings from a .txt file.', HydrusData.Call( self._ImportFromTXT, False ) ) )
menu_items.append( ( 'normal', 'from .txt file (only add pairs--no deletions)', 'Load siblings from a .txt file.', HydrusData.Call( self._ImportFromTXT, True ) ) )
listctrl_panel.AddMenuButton( 'import', menu_items )
menu_items = []
menu_items.append( ( 'normal', 'to clipboard', 'Save selected siblings to your clipboard.', self._ExportToClipboard ) )
menu_items.append( ( 'normal', 'to .txt file', 'Save selected siblings to a .txt file.', self._ExportToTXT ) )
listctrl_panel.AddMenuButton( 'export', menu_items, enabled_only_on_selection = True )
self._old_siblings = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
self._new_sibling = ClientGUICommon.BetterStaticText( self )
( gumpf, preview_height ) = ClientGUIFunctions.ConvertTextToPixels( self._old_siblings, ( 12, 6 ) )
self._old_siblings.setMinimumHeight( preview_height )
self._old_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterOlds, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._old_input.setEnabled( False )
self._new_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.SetNew, CC.LOCAL_FILE_SERVICE_KEY, service_key )
self._new_input.setEnabled( False )
self._add = QW.QPushButton( 'add', self )
self._add.clicked.connect( self.EventAddButton )
self._add.setEnabled( False )
#
self._status_st = ClientGUICommon.BetterStaticText( self, 'initialising\u2026' )
self._sync_status_st = ClientGUICommon.BetterStaticText( self, '' )
self._sync_status_st.setWordWrap( True )
self._count_st = ClientGUICommon.BetterStaticText( self, '' )
old_sibling_box = QP.VBoxLayout()
QP.AddToLayout( old_sibling_box, ClientGUICommon.BetterStaticText( self, label = 'set tags to be replaced' ), CC.FLAGS_CENTER )
QP.AddToLayout( old_sibling_box, self._old_siblings, CC.FLAGS_EXPAND_BOTH_WAYS )
new_sibling_box = QP.VBoxLayout()
QP.AddToLayout( new_sibling_box, ClientGUICommon.BetterStaticText( self, label = 'set new ideal tag' ), CC.FLAGS_CENTER )
new_sibling_box.addStretch( 1 )
QP.AddToLayout( new_sibling_box, self._new_sibling, CC.FLAGS_EXPAND_PERPENDICULAR )
new_sibling_box.addStretch( 1 )
text_box = QP.HBoxLayout()
QP.AddToLayout( text_box, old_sibling_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( text_box, new_sibling_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
input_box = QP.HBoxLayout()
QP.AddToLayout( input_box, self._old_input )
QP.AddToLayout( input_box, self._new_input )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._count_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._show_all,self,'show all pairs'), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, listctrl_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, text_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( vbox, input_box, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
#
self._tag_siblings.itemSelectionChanged.connect( self._SetButtonStatus )
self._show_all.clicked.connect( self._UpdateListCtrlData )
self._old_siblings.listBoxChanged.connect( self._UpdateListCtrlData )
HG.client_controller.CallToThread( self.THREADInitialise, tags, self._service_key )
def _AddPairs( self, pairs, add_only = False, remove_only = False, default_reason = None ):
pairs = list( pairs )
pairs.sort( key = lambda c_p1: HydrusTags.ConvertTagToSortable( c_p1[1] ) )
new_pairs = []
current_pairs = []
petitioned_pairs = []
pending_pairs = []
for pair in pairs:
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
if not add_only:
pending_pairs.append( pair )
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
if not remove_only:
petitioned_pairs.append( pair )
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
if not add_only:
current_pairs.append( pair )
elif not remove_only and self._CanAdd( pair ):
new_pairs.append( pair )
if len( new_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if default_reason is not None:
reason = default_reason
elif self._service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( new_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in new_pairs ) )
suggestions = []
suggestions.append( 'merging underscores/typos/phrasing/unnamespaced to a single uncontroversial good tag' )
suggestions.append( 'rewording/namespacing based on preference' )
message = 'Enter a reason for:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'To be added. A janitor will review your petition.'
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in new_pairs: self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].update( new_pairs )
else:
if len( current_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if default_reason is not None:
reason = default_reason
elif self._service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( current_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in current_pairs ) )
message = 'Enter a reason for:'
message += os.linesep * 2
message += pair_strings
message += os.linesep * 2
message += 'to be removed. You will see the delete as soon as you upload, but a janitor will review your petition to decide if all users should receive it as well.'
suggestions = []
suggestions.append( 'obvious typo/mistake' )
suggestions.append( 'disambiguation' )
suggestions.append( 'correcting to repository standard' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in current_pairs:
self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].update( current_pairs )
if len( pending_pairs ) > 0:
if len( pending_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in pending_pairs ) )
if len( pending_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are pending.'
else:
message = 'The pair ' + pair_strings + ' is pending.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the pend', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].difference_update( pending_pairs )
if len( petitioned_pairs ) > 0:
if len( petitioned_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = ', '.join( ( old + '->' + new for ( old, new ) in petitioned_pairs ) )
if len( petitioned_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are petitioned.'
else:
message = 'The pair ' + pair_strings + ' is petitioned.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the petition', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].difference_update( petitioned_pairs )
def _AutoPetitionConflicts( self, pairs ):
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_olds_to_news = dict( current_pairs )
current_olds = { current_old for ( current_old, current_new ) in current_pairs }
pairs_to_auto_petition = set()
for ( old, new ) in pairs:
if old in current_olds:
conflicting_new = current_olds_to_news[ old ]
if conflicting_new != new:
conflicting_pair = ( old, conflicting_new )
pairs_to_auto_petition.add( conflicting_pair )
if len( pairs_to_auto_petition ) > 0:
pairs_to_auto_petition = list( pairs_to_auto_petition )
self._AddPairs( pairs_to_auto_petition, remove_only = True, default_reason = 'AUTO-PETITION TO REASSIGN TO: ' + new )
def _CanAdd( self, potential_pair ):
( potential_old, potential_new ) = potential_pair
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_olds = { old for ( old, new ) in current_pairs }
# test for ambiguity
if potential_old in current_olds:
QW.QMessageBox.critical( self, 'Error', 'There already is a relationship set for the tag '+potential_old+'.' )
return False
# test for loops
if potential_new in current_olds:
seen_tags = set()
d = dict( current_pairs )
next_new = potential_new
while next_new in d:
next_new = d[ next_new ]
if next_new == potential_old:
QW.QMessageBox.critical( self, 'Error', 'Adding '+potential_old+'->'+potential_new+' would create a loop!' )
return False
if next_new in seen_tags:
message = 'The pair you mean to add seems to connect to a sibling loop already in your database! Please undo this loop first. The tags involved in the loop are:'
message += os.linesep * 2
message += ', '.join( seen_tags )
QW.QMessageBox.critical( self, 'Error', message )
return False
seen_tags.add( next_new )
return True
def _ConvertPairToListCtrlTuples( self, pair ):
( old, new ) = pair
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
status = HC.CONTENT_STATUS_PENDING
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
status = HC.CONTENT_STATUS_PETITIONED
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
status = HC.CONTENT_STATUS_CURRENT
sign = HydrusData.ConvertStatusToPrefix( status )
pretty_status = sign
existing_olds = self._old_siblings.GetTags()
note = ''
if old in existing_olds:
if status == HC.CONTENT_STATUS_PENDING:
note = 'CONFLICT: Will be rescinded on add.'
elif status == HC.CONTENT_STATUS_CURRENT:
note = 'CONFLICT: Will be petitioned/deleted on add.'
display_tuple = ( pretty_status, old, new, note )
sort_tuple = ( status, old, new, note )
return ( display_tuple, sort_tuple )
def _DeserialiseImportString( self, import_string ):
tags = HydrusText.DeserialiseNewlinedTexts( import_string )
if len( tags ) % 2 == 1:
raise Exception( 'Uneven number of tags found!' )
pairs = []
for i in range( len( tags ) // 2 ):
pair = ( tags[ 2 * i ], tags[ ( 2 * i ) + 1 ] )
pairs.append( pair )
return pairs
def _ExportToClipboard( self ):
export_string = self._GetExportString()
HG.client_controller.pub( 'clipboard', 'text', export_string )
def _ExportToTXT( self ):
export_string = self._GetExportString()
with QP.FileDialog( self, 'Set the export path.', default_filename = 'siblings.txt', acceptMode = QW.QFileDialog.AcceptSave, fileMode = QW.QFileDialog.AnyFile ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
with open( path, 'w', encoding = 'utf-8' ) as f:
f.write( export_string )
def _GetExportString( self ):
tags = []
for ( a, b ) in self._tag_siblings.GetData( only_selected = True ):
tags.append( a )
tags.append( b )
export_string = os.linesep.join( tags )
return export_string
def _ImportFromClipboard( self, add_only = False ):
try:
import_string = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
pairs = self._DeserialiseImportString( import_string )
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ImportFromTXT( self, add_only = False ):
with QP.FileDialog( self, 'Select the file to import.', acceptMode = QW.QFileDialog.AcceptOpen ) as dlg:
if dlg.exec() != QW.QDialog.Accepted:
return
else:
path = dlg.GetPath()
with open( path, 'r', encoding = 'utf-8' ) as f:
import_string = f.read()
pairs = self._DeserialiseImportString( import_string )
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ListCtrlActivated( self ):
pairs = self._tag_siblings.GetData( only_selected = True )
if len( pairs ) > 0:
self._AddPairs( pairs )
self._UpdateListCtrlData()
def _SetButtonStatus( self ):
if self._current_new is None or len( self._old_siblings.GetTags() ) == 0:
self._add.setEnabled( False )
else:
self._add.setEnabled( True )
def _UpdateListCtrlData( self ):
olds = self._old_siblings.GetTags()
pertinent_tags = set( olds )
if self._current_new is not None:
pertinent_tags.add( self._current_new )
self._tag_siblings.DeleteDatas( self._tag_siblings.GetData() )
all_pairs = set()
show_all = self._show_all.isChecked()
for ( status, pairs ) in self._current_statuses_to_pairs.items():
if status == HC.CONTENT_STATUS_DELETED:
continue
if len( pertinent_tags ) == 0:
if status == HC.CONTENT_STATUS_CURRENT and not show_all:
continue
# show all pending/petitioned
all_pairs.update( pairs )
else:
# show all appropriate
for pair in pairs:
( a, b ) = pair
if a in pertinent_tags or b in pertinent_tags or show_all:
all_pairs.add( pair )
self._tag_siblings.AddDatas( all_pairs )
self._tag_siblings.Sort()
def EnterOlds( self, olds ):
if self._current_new in olds:
self.SetNew( set() )
self._old_siblings.EnterTags( olds )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EventAddButton( self ):
if self._current_new is not None and len( self._old_siblings.GetTags() ) > 0:
olds = self._old_siblings.GetTags()
pairs = [ ( old, self._current_new ) for old in olds ]
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs )
self._old_siblings.SetTags( set() )
self.SetNew( set() )
self._UpdateListCtrlData()
self._SetButtonStatus()
def GetContentUpdates( self ):
# we make it manually here because of the mass pending tags done (but not undone on a rescind) on a pending pair!
# we don't want to send a pend and then rescind it, cause that will spam a thousand bad tags and not undo it
# actually, we don't do this for siblings, but we do for parents, and let's have them be the same
content_updates = []
if self._i_am_local_tag_service:
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_ADD, pair ) )
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DELETE, pair ) )
else:
current_pending = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
original_pending = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
current_petitioned = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
original_petitioned = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
new_pends = current_pending.difference( original_pending )
rescinded_pends = original_pending.difference( current_pending )
new_petitions = current_petitioned.difference( original_petitioned )
rescinded_petitions = original_petitioned.difference( current_petitioned )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PEND, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_RESCIND_PEND, pair ) for pair in rescinded_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PETITION, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_petitions ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_RESCIND_PETITION, pair ) for pair in rescinded_petitions ) )
return ( self._service_key, content_updates )
def HasUncommittedPair( self ):
return len( self._old_siblings.GetTags() ) > 0 and self._current_new is not None
def SetNew( self, new_tags ):
if len( new_tags ) == 0:
self._new_sibling.clear()
self._current_new = None
else:
new = list( new_tags )[0]
self._old_siblings.RemoveTags( { new } )
self._new_sibling.setText( new )
self._current_new = new
self._UpdateListCtrlData()
self._SetButtonStatus()
def SetTagBoxFocus( self ):
if len( self._old_siblings.GetTags() ) == 0:
self._old_input.setFocus( QC.Qt.OtherFocusReason )
else:
self._new_input.setFocus( QC.Qt.OtherFocusReason )
def THREADInitialise( self, tags, service_key ):
def qt_code( original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do ):
if not self or not QP.isValid( self ):
return
self._original_statuses_to_pairs = original_statuses_to_pairs
self._current_statuses_to_pairs = current_statuses_to_pairs
self._status_st.setText( 'Tags on the left will be appear as those on the right.' )
looking_good = True
if len( service_keys_to_work_to_do ) == 0:
looking_good = False
status_text = 'No services currently apply these siblings. Changes here will have no effect unless sibling application is changed later.'
else:
synced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if not work_to_do ) )
unsynced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if work_to_do ) )
synced_string = ', '.join( ( '"{}"'.format( name ) for name in synced_names ) )
unsynced_string = ', '.join( ( '"{}"'.format( name ) for name in unsynced_names ) )
if len( unsynced_names ) == 0:
service_part = '{} apply these siblings and are fully synced.'.format( synced_string )
else:
looking_good = False
if len( synced_names ) > 0:
service_part = '{} apply these siblings and are fully synced, but {} still have work to do.'.format( synced_string, unsynced_string )
else:
service_part = '{} apply these siblings but still have sync work to do.'.format( unsynced_string )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
maintenance_part = 'Siblings are set to sync all the time in the background.'
if looking_good:
changes_part = 'Changes from this dialog should be reflected soon after closing the dialog.'
else:
changes_part = 'It may take some time for changes here to apply everywhere, though.'
else:
looking_good = False
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
maintenance_part = 'Siblings are set to sync only when you are not using the client.'
changes_part = 'It may take some time for changes here to apply.'
else:
maintenance_part = 'Siblings are not set to sync.'
changes_part = 'Changes here will not apply unless sync is manually forced to run.'
s = os.linesep * 2
status_text = s.join( ( service_part, maintenance_part, changes_part ) )
self._sync_status_st.setText( status_text )
if looking_good:
self._sync_status_st.setObjectName( 'HydrusValid' )
else:
self._sync_status_st.setObjectName( 'HydrusWarning' )
self._sync_status_st.style().polish( self._sync_status_st )
self._count_st.setText( 'Starting with '+HydrusData.ToHumanInt(len(original_statuses_to_pairs[HC.CONTENT_STATUS_CURRENT]))+' pairs.' )
self._old_input.setEnabled( True )
self._new_input.setEnabled( True )
if tags is None:
self._UpdateListCtrlData()
else:
self.EnterOlds( tags )
original_statuses_to_pairs = HG.client_controller.Read( 'tag_siblings', service_key )
( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ) = HG.client_controller.Read( 'tag_display_application' )
service_keys_we_care_about = { s_k for ( s_k, s_ks ) in master_service_keys_to_sibling_applicable_service_keys.items() if service_key in s_ks }
service_keys_to_work_to_do = {}
for s_k in service_keys_we_care_about:
status = HG.client_controller.Read( 'tag_display_maintenance_status', s_k )
work_to_do = status[ 'num_siblings_to_sync' ] > 0
service_keys_to_work_to_do[ s_k ] = work_to_do
current_statuses_to_pairs = collections.defaultdict( set )
current_statuses_to_pairs.update( { key : set( value ) for ( key, value ) in original_statuses_to_pairs.items() } )
QP.CallAfter( qt_code, original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do )
class ReviewTagDisplayMaintenancePanel( ClientGUIScrolledPanels.ReviewPanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ReviewPanel.__init__( self, parent )
self._tag_services_notebook = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services_notebook, 100 )
self._tag_services_notebook.setMinimumWidth( min_width )
#
services = list( HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES ) )
select_service_key = services[0].GetServiceKey()
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_services_notebook, service_key )
self._tag_services_notebook.addTab( page, name )
if service_key == select_service_key:
self._tag_services_notebook.setCurrentWidget( page )
#
vbox = QP.VBoxLayout()
message = 'Figuring out how tags should appear according to sibling and parent application rules takes time. When you set new rules, the changes do not happen immediately--the client catches up in the background. You can review current progress and force faster sync here.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
self._UpdateStatusText()
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
HG.client_controller.sub( self, '_UpdateStatusText', 'notify_new_menu_option' )
def _UpdateStatusText( self ):
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. If there is work to do here, it should be cleared out in real time as you watch.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. If there is work to do here, it should be cleared out when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is work to do here, you can force it now by clicking \'work now!\' button.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._siblings_and_parents_st = ClientGUICommon.BetterStaticText( self )
self._progress = ClientGUICommon.TextAndGauge( self )
self._refresh_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().refresh, self._StartRefresh )
self._go_faster_button = ClientGUICommon.BetterButton( self, 'work hard now!', self._SyncFaster )
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._refresh_button, CC.FLAGS_CENTER )
QP.AddToLayout( button_hbox, self._go_faster_button, CC.FLAGS_CENTER )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._siblings_and_parents_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._progress, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, button_hbox, CC.FLAGS_ON_RIGHT )
vbox.addStretch( 1 )
self.setLayout( vbox )
self._refresh_values_updater = self._InitialiseRefreshValuesUpdater()
HG.client_controller.sub( self, 'NotifyRefresh', 'notify_new_tag_display_sync_status' )
HG.client_controller.sub( self, '_StartRefresh', 'notify_new_tag_display_application' )
self._StartRefresh()
def _InitialiseRefreshValuesUpdater( self ):
service_key = self._service_key
def loading_callable():
self._progress.SetText( 'refreshing\u2026' )
self._refresh_button.setEnabled( False )
# keep button available to slow down
running_fast_and_button_is_slow = HG.client_controller.tag_display_maintenance_manager.CurrentlyGoingFaster( self._service_key ) and 'slow' in self._go_faster_button.text()
if not running_fast_and_button_is_slow:
self._go_faster_button.setEnabled( False )
def work_callable():
status = HG.client_controller.Read( 'tag_display_maintenance_status', service_key )
time.sleep( 0.1 ) # for user feedback more than anything
return status
def publish_callable( result ):
status = result
num_siblings_to_sync = status[ 'num_siblings_to_sync' ]
num_parents_to_sync = status[ 'num_parents_to_sync' ]
num_items_to_regen = num_siblings_to_sync + num_parents_to_sync
if num_items_to_regen == 0:
message = 'All synced!'
elif num_parents_to_sync == 0:
message = '{} siblings to sync.'.format( HydrusData.ToHumanInt( num_siblings_to_sync ) )
elif num_siblings_to_sync == 0:
message = '{} parents to sync.'.format( HydrusData.ToHumanInt( num_parents_to_sync ) )
else:
message = '{} siblings and {} parents to sync.'.format( HydrusData.ToHumanInt( num_siblings_to_sync ), HydrusData.ToHumanInt( num_parents_to_sync ) )
self._siblings_and_parents_st.setText( message )
#
num_actual_rows = status[ 'num_actual_rows' ]
num_ideal_rows = status[ 'num_ideal_rows' ]
if num_items_to_regen == 0:
if num_ideal_rows == 0:
message = 'No siblings/parents applying to this service.'
else:
message = '{} rules, all synced!'.format( HydrusData.ToHumanInt( num_ideal_rows ) )
value = 1
range = 1
sync_possible = False
else:
value = None
range = None
if num_ideal_rows == 0:
message = 'Removing all siblings/parents, {} rules remaining.'.format( HydrusData.ToHumanInt( num_actual_rows ) )
else:
message = '{} rules applied now, moving to {}.'.format( HydrusData.ToHumanInt( num_actual_rows ), HydrusData.ToHumanInt( num_ideal_rows ) )
if num_actual_rows <= num_ideal_rows:
value = num_actual_rows
range = num_ideal_rows
sync_possible = True
self._progress.SetValue( message, value, range )
self._refresh_button.setEnabled( True )
self._go_faster_button.setVisible( sync_possible )
self._go_faster_button.setEnabled( sync_possible )
if HG.client_controller.tag_display_maintenance_manager.CurrentlyGoingFaster( self._service_key ):
self._go_faster_button.setText( 'slow down!' )
else:
if not HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._go_faster_button.setText( 'work now!' )
else:
self._go_faster_button.setText( 'work hard now!' )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _StartRefresh( self ):
self._refresh_values_updater.update()
def _SyncFaster( self ):
HG.client_controller.tag_display_maintenance_manager.FlipSyncFaster( self._service_key )
self._StartRefresh()
def NotifyRefresh( self, service_key ):
if service_key == self._service_key:
self._StartRefresh()
class TagFilterButton( ClientGUICommon.BetterButton ):
def __init__( self, parent, message, tag_filter, only_show_blacklist = False, label_prefix = None ):
ClientGUICommon.BetterButton.__init__( self, parent, 'tag filter', self._EditTagFilter )
self._message = message
self._tag_filter = tag_filter
self._only_show_blacklist = only_show_blacklist
self._label_prefix = label_prefix
self._UpdateLabel()
def _EditTagFilter( self ):
if self._only_show_blacklist:
title = 'edit blacklist'
else:
title = 'edit tag filter'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
namespaces = HG.client_controller.network_engine.domain_manager.GetParserNamespaces()
panel = EditTagFilterPanel( dlg, self._tag_filter, only_show_blacklist = self._only_show_blacklist, namespaces = namespaces, message = self._message )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
self._tag_filter = panel.GetValue()
self._UpdateLabel()
def _UpdateLabel( self ):
if self._only_show_blacklist:
tt = self._tag_filter.ToBlacklistString()
else:
tt = self._tag_filter.ToPermittedString()
if self._label_prefix is not None:
tt = self._label_prefix + tt
button_text = HydrusText.ElideText( tt, 45 )
self.setText( button_text )
self.setToolTip( tt )
def GetValue( self ):
return self._tag_filter
def SetValue( self, tag_filter ):
self._tag_filter = tag_filter
self._UpdateLabel()
class TagSummaryGenerator( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_TAG_SUMMARY_GENERATOR
SERIALISABLE_NAME = 'Tag Summary Generator'
SERIALISABLE_VERSION = 2
def __init__( self, background_colour = None, text_colour = None, namespace_info = None, separator = None, example_tags = None, show = True ):
if background_colour is None:
background_colour = QG.QColor( 223, 227, 230, 255 )
if text_colour is None:
text_colour = QG.QColor( 1, 17, 26, 255 )
if namespace_info is None:
namespace_info = []
namespace_info.append( ( 'creator', '', ', ' ) )
namespace_info.append( ( 'series', '', ', ' ) )
namespace_info.append( ( 'title', '', ', ' ) )
if separator is None:
separator = ' - '
if example_tags is None:
example_tags = []
self._background_colour = background_colour
self._text_colour = text_colour
self._namespace_info = namespace_info
self._separator = separator
self._example_tags = list( example_tags )
self._show = show
self._UpdateNamespaceLookup()
def _GetSerialisableInfo( self ):
bc = self._background_colour
background_colour_rgba = [ bc.red(), bc.green(), bc.blue(), bc.alpha() ]
tc = self._text_colour
text_colour_rgba = [ tc.red(), tc.green(), tc.blue(), tc.alpha() ]
return ( background_colour_rgba, text_colour_rgba, self._namespace_info, self._separator, self._example_tags, self._show )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( background_rgba, text_rgba, self._namespace_info, self._separator, self._example_tags, self._show ) = serialisable_info
( r, g, b, a ) = background_rgba
self._background_colour = QG.QColor( r, g, b, a )
( r, g, b, a ) = text_rgba
self._text_colour = QG.QColor( r, g, b, a )
self._namespace_info = [ tuple( row ) for row in self._namespace_info ]
self._UpdateNamespaceLookup()
def _UpdateNamespaceLookup( self ):
self._interesting_namespaces = { namespace for ( namespace, prefix, separator ) in self._namespace_info }
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( namespace_info, separator, example_tags ) = old_serialisable_info
background_rgba = ( 223, 227, 230, 255 )
text_rgba = ( 1, 17, 26, 255 )
show = True
new_serialisable_info = ( background_rgba, text_rgba, namespace_info, separator, example_tags, show )
return ( 2, new_serialisable_info )
def GenerateExampleSummary( self ):
if not self._show:
return 'not showing'
else:
return self.GenerateSummary( self._example_tags )
def GenerateSummary( self, tags, max_length = None ):
if not self._show:
return ''
namespaces_to_subtags = collections.defaultdict( list )
for tag in tags:
( namespace, subtag ) = HydrusTags.SplitTag( tag )
if namespace in self._interesting_namespaces:
namespaces_to_subtags[ namespace ].append( subtag )
for ( namespace, unsorted_l ) in list( namespaces_to_subtags.items() ):
sorted_l = HydrusTags.SortNumericTags( unsorted_l )
sorted_l = HydrusTags.CollapseMultipleSortedNumericTagsToMinMax( sorted_l )
namespaces_to_subtags[ namespace ] = sorted_l
namespace_texts = []
for ( namespace, prefix, separator ) in self._namespace_info:
subtags = namespaces_to_subtags[ namespace ]
if len( subtags ) > 0:
namespace_text = prefix + separator.join( namespaces_to_subtags[ namespace ] )
namespace_texts.append( namespace_text )
summary = self._separator.join( namespace_texts )
if max_length is not None:
summary = summary[:max_length]
return summary
def GetBackgroundColour( self ):
return self._background_colour
def GetTextColour( self ):
return self._text_colour
def ToTuple( self ):
return ( self._background_colour, self._text_colour, self._namespace_info, self._separator, self._example_tags, self._show )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_TAG_SUMMARY_GENERATOR ] = TagSummaryGenerator
class EditTagSummaryGeneratorPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent: QW.QWidget, tag_summary_generator: TagSummaryGenerator ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
show_panel = ClientGUICommon.StaticBox( self, 'shows' )
self._show = QW.QCheckBox( show_panel )
edit_panel = ClientGUICommon.StaticBox( self, 'edit' )
self._background_colour = ClientGUICommon.AlphaColourControl( edit_panel )
self._text_colour = ClientGUICommon.AlphaColourControl( edit_panel )
self._namespaces_listbox = ClientGUIListBoxes.QueueListBox( edit_panel, 8, self._ConvertNamespaceToListBoxString, self._AddNamespaceInfo, self._EditNamespaceInfo )
self._separator = QW.QLineEdit( edit_panel )
example_panel = ClientGUICommon.StaticBox( self, 'example' )
self._example_tags = QW.QPlainTextEdit( example_panel )
self._test_result = QW.QLineEdit( example_panel )
self._test_result.setReadOnly( True )
#
( background_colour, text_colour, namespace_info, separator, example_tags, show ) = tag_summary_generator.ToTuple()
self._show.setChecked( show )
self._background_colour.SetValue( background_colour )
self._text_colour.SetValue( text_colour )
self._namespaces_listbox.AddDatas( namespace_info )
self._separator.setText( separator )
self._example_tags.setPlainText( os.linesep.join( example_tags ) )
self._UpdateTest()
#
rows = []
rows.append( ( 'currently shows (turn off to hide): ', self._show ) )
gridbox = ClientGUICommon.WrapInGrid( show_panel, rows )
show_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
rows = []
rows.append( ( 'background colour: ', self._background_colour ) )
rows.append( ( 'text colour: ', self._text_colour ) )
gridbox = ClientGUICommon.WrapInGrid( edit_panel, rows )
edit_panel.Add( ClientGUICommon.BetterStaticText( edit_panel, 'The colours only work for the thumbnails right now!' ), CC.FLAGS_EXPAND_PERPENDICULAR )
edit_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
edit_panel.Add( self._namespaces_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
edit_panel.Add( ClientGUICommon.WrapInText( self._separator, edit_panel, 'separator' ), CC.FLAGS_EXPAND_PERPENDICULAR )
example_panel.Add( ClientGUICommon.BetterStaticText( example_panel, 'Enter some newline-separated tags here to see what your current object would generate.' ), CC.FLAGS_EXPAND_PERPENDICULAR )
example_panel.Add( self._example_tags, CC.FLAGS_EXPAND_BOTH_WAYS )
example_panel.Add( self._test_result, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, show_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, edit_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, example_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
#
self._show.clicked.connect( self._UpdateTest )
self._separator.textChanged.connect( self._UpdateTest )
self._example_tags.textChanged.connect( self._UpdateTest )
self._namespaces_listbox.listBoxChanged.connect( self._UpdateTest )
def _AddNamespaceInfo( self ):
namespace = ''
prefix = ''
separator = ', '
namespace_info = ( namespace, prefix, separator )
return self._EditNamespaceInfo( namespace_info )
def _ConvertNamespaceToListBoxString( self, namespace_info ):
( namespace, prefix, separator ) = namespace_info
if namespace == '':
pretty_namespace = 'unnamespaced'
else:
pretty_namespace = namespace
pretty_prefix = prefix
pretty_separator = separator
return pretty_namespace + ' | prefix: "' + pretty_prefix + '" | separator: "' + pretty_separator + '"'
def _EditNamespaceInfo( self, namespace_info ):
( namespace, prefix, separator ) = namespace_info
message = 'Edit namespace.'
with ClientGUIDialogs.DialogTextEntry( self, message, namespace, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
namespace = dlg.GetValue()
else:
raise HydrusExceptions.VetoException()
message = 'Edit prefix.'
with ClientGUIDialogs.DialogTextEntry( self, message, prefix, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
prefix = dlg.GetValue()
else:
raise HydrusExceptions.VetoException()
message = 'Edit separator.'
with ClientGUIDialogs.DialogTextEntry( self, message, separator, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
separator = dlg.GetValue()
namespace_info = ( namespace, prefix, separator )
return namespace_info
else:
raise HydrusExceptions.VetoException()
def _UpdateTest( self ):
tag_summary_generator = self.GetValue()
self._test_result.setText( tag_summary_generator.GenerateExampleSummary() )
def GetValue( self ) -> TagSummaryGenerator:
show = self._show.isChecked()
background_colour = self._background_colour.GetValue()
text_colour = self._text_colour.GetValue()
namespace_info = self._namespaces_listbox.GetData()
separator = self._separator.text()
example_tags = HydrusTags.CleanTags( HydrusText.DeserialiseNewlinedTexts( self._example_tags.toPlainText() ) )
return TagSummaryGenerator( background_colour, text_colour, namespace_info, separator, example_tags, show )
class TagSummaryGeneratorButton( ClientGUICommon.BetterButton ):
def __init__( self, parent: QW.QWidget, tag_summary_generator: TagSummaryGenerator ):
label = tag_summary_generator.GenerateExampleSummary()
ClientGUICommon.BetterButton.__init__( self, parent, label, self._Edit )
self._tag_summary_generator = tag_summary_generator
def _Edit( self ):
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit tag summary' ) as dlg:
panel = EditTagSummaryGeneratorPanel( dlg, self._tag_summary_generator )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
self._tag_summary_generator = panel.GetValue()
self.setText( self._tag_summary_generator.GenerateExampleSummary() )
def GetValue( self ) -> TagSummaryGenerator:
return self._tag_summary_generator
| 39.888148 | 356 | 0.521286 | import collections
import itertools
import os
import random
import time
import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusText
from hydrus.core.networking import HydrusNetwork
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientManagers
from hydrus.client.gui import ClientGUIAsync
from hydrus.client.gui import ClientGUICore as CGC
from hydrus.client.gui import ClientGUIDialogs
from hydrus.client.gui import ClientGUIDialogsQuick
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import ClientGUIMenus
from hydrus.client.gui import ClientGUIScrolledPanels
from hydrus.client.gui import ClientGUIScrolledPanelsReview
from hydrus.client.gui import ClientGUIShortcuts
from hydrus.client.gui import ClientGUITagSuggestions
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.lists import ClientGUIListBoxes
from hydrus.client.gui.lists import ClientGUIListConstants as CGLC
from hydrus.client.gui.lists import ClientGUIListCtrl
from hydrus.client.gui.networking import ClientGUIHydrusNetwork
from hydrus.client.gui.search import ClientGUIACDropdown
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.gui.widgets import ClientGUIControls
from hydrus.client.gui.widgets import ClientGUIMenuButton
from hydrus.client.media import ClientMedia
from hydrus.client.metadata import ClientTags
from hydrus.client.metadata import ClientTagsHandling
class EditTagAutocompleteOptionsPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent: QW.QWidget, tag_autocomplete_options: ClientTagsHandling.TagAutocompleteOptions ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._original_tag_autocomplete_options = tag_autocomplete_options
services_manager = HG.client_controller.services_manager
all_real_tag_service_keys = services_manager.GetServiceKeys( HC.REAL_TAG_SERVICES )
all_real_file_service_keys = services_manager.GetServiceKeys( ( HC.LOCAL_FILE_DOMAIN, HC.FILE_REPOSITORY ) )
self._write_autocomplete_tag_domain = ClientGUICommon.BetterChoice( self )
self._write_autocomplete_tag_domain.setToolTip( 'A manage tags autocomplete will start with this domain. Typically only useful with this service or "all known tags".' )
self._write_autocomplete_tag_domain.addItem( services_manager.GetName( CC.COMBINED_TAG_SERVICE_KEY ), CC.COMBINED_TAG_SERVICE_KEY )
for service_key in all_real_tag_service_keys:
self._write_autocomplete_tag_domain.addItem( services_manager.GetName( service_key ), service_key )
self._override_write_autocomplete_file_domain = QW.QCheckBox( self )
self._override_write_autocomplete_file_domain.setToolTip( 'If set, a manage tags dialog autocomplete will start with a different file domain than the one that launched the dialog.' )
self._write_autocomplete_file_domain = ClientGUICommon.BetterChoice( self )
self._write_autocomplete_file_domain.setToolTip( 'A manage tags autocomplete will start with this domain. Normally only useful for "all known files" or "my files".' )
self._write_autocomplete_file_domain.addItem( services_manager.GetName( CC.COMBINED_FILE_SERVICE_KEY ), CC.COMBINED_FILE_SERVICE_KEY )
for service_key in all_real_file_service_keys:
self._write_autocomplete_file_domain.addItem( services_manager.GetName( service_key ), service_key )
self._search_namespaces_into_full_tags = QW.QCheckBox( self )
self._search_namespaces_into_full_tags.setToolTip( 'If on, a search for "ser" will return all "series:" results such as "series:metrod". On large tag services, these searches are extremely slow.' )
self._namespace_bare_fetch_all_allowed = QW.QCheckBox( self )
self._namespace_bare_fetch_all_allowed.setToolTip( 'If on, a search for "series:" will return all "series:" results. On large tag services, these searches are extremely slow.' )
self._namespace_fetch_all_allowed = QW.QCheckBox( self )
self._namespace_fetch_all_allowed.setToolTip( 'If on, a search for "series:*" will return all "series:" results. On large tag services, these searches are extremely slow.' )
self._fetch_all_allowed = QW.QCheckBox( self )
self._fetch_all_allowed.setToolTip( 'If on, a search for "*" will return all tags. On large tag services, these searches are extremely slow.' )
self._fetch_results_automatically = QW.QCheckBox( self )
self._fetch_results_automatically.setToolTip( 'If on, results will load as you type. If off, you will have to hit a shortcut (default Ctrl+Space) to load results.' )
self._exact_match_character_threshold = ClientGUICommon.NoneableSpinCtrl( self, none_phrase = 'always autocomplete (only appropriate for small tag services)', min = 1, max = 256, unit = 'characters' )
self._exact_match_character_threshold.setToolTip( 'When the search text has <= this many characters, autocomplete will not occur and you will only get results that exactly match the input. Increasing this value makes autocomplete snappier but reduces the number of results.' )
self._write_autocomplete_tag_domain.SetValue( tag_autocomplete_options.GetWriteAutocompleteTagDomain() )
self._override_write_autocomplete_file_domain.setChecked( tag_autocomplete_options.OverridesWriteAutocompleteFileDomain() )
self._write_autocomplete_file_domain.SetValue( tag_autocomplete_options.GetWriteAutocompleteFileDomain() )
self._search_namespaces_into_full_tags.setChecked( tag_autocomplete_options.SearchNamespacesIntoFullTags() )
self._namespace_bare_fetch_all_allowed.setChecked( tag_autocomplete_options.NamespaceBareFetchAllAllowed() )
self._namespace_fetch_all_allowed.setChecked( tag_autocomplete_options.NamespaceFetchAllAllowed() )
self._fetch_all_allowed.setChecked( tag_autocomplete_options.FetchAllAllowed() )
self._fetch_results_automatically.setChecked( tag_autocomplete_options.FetchResultsAutomatically() )
self._exact_match_character_threshold.SetValue( tag_autocomplete_options.GetExactMatchCharacterThreshold() )
rows = []
rows.append( ( 'Fetch results as you type: ', self._fetch_results_automatically ) )
rows.append( ( 'Do-not-autocomplete character threshold: ', self._exact_match_character_threshold ) )
if tag_autocomplete_options.GetServiceKey() == CC.COMBINED_TAG_SERVICE_KEY:
self._write_autocomplete_tag_domain.setVisible( False )
self._override_write_autocomplete_file_domain.setVisible( False )
self._write_autocomplete_file_domain.setVisible( False )
else:
rows.append( ( 'Override default autocomplete file domain in _manage tags_: ', self._override_write_autocomplete_file_domain ) )
rows.append( ( 'Default autocomplete file domain in _manage tags_: ', self._write_autocomplete_file_domain ) )
rows.append( ( 'Default autocomplete tag domain in _manage tags_: ', self._write_autocomplete_tag_domain ) )
rows.append( ( 'Search namespaces with normal input: ', self._search_namespaces_into_full_tags ) )
rows.append( ( 'Allow "namespace:": ', self._namespace_bare_fetch_all_allowed ) )
rows.append( ( 'Allow "namespace:*": ', self._namespace_fetch_all_allowed ) )
rows.append( ( 'Allow "*": ', self._fetch_all_allowed ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = QP.VBoxLayout()
label = 'The settings that permit searching namespaces and expansive "*" queries can be very expensive on a large client and may cause problems!'
st = ClientGUICommon.BetterStaticText( self, label = label )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.widget().setLayout( vbox )
self._UpdateControls()
self._override_write_autocomplete_file_domain.stateChanged.connect( self._UpdateControls )
self._search_namespaces_into_full_tags.stateChanged.connect( self._UpdateControls )
self._namespace_bare_fetch_all_allowed.stateChanged.connect( self._UpdateControls )
def _UpdateControls( self ):
self._write_autocomplete_file_domain.setEnabled( self._override_write_autocomplete_file_domain.isChecked() )
if self._search_namespaces_into_full_tags.isChecked():
self._namespace_bare_fetch_all_allowed.setEnabled( False )
self._namespace_fetch_all_allowed.setEnabled( False )
else:
self._namespace_bare_fetch_all_allowed.setEnabled( True )
if self._namespace_bare_fetch_all_allowed.isChecked():
self._namespace_fetch_all_allowed.setEnabled( False )
else:
self._namespace_fetch_all_allowed.setEnabled( True )
for c in ( self._namespace_bare_fetch_all_allowed, self._namespace_fetch_all_allowed ):
if not c.isEnabled():
c.blockSignals( True )
c.setChecked( True )
c.blockSignals( False )
def GetValue( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( self._original_tag_autocomplete_options.GetServiceKey() )
write_autocomplete_tag_domain = self._write_autocomplete_tag_domain.GetValue()
override_write_autocomplete_file_domain = self._override_write_autocomplete_file_domain.isChecked()
write_autocomplete_file_domain = self._write_autocomplete_file_domain.GetValue()
search_namespaces_into_full_tags = self._search_namespaces_into_full_tags.isChecked()
namespace_bare_fetch_all_allowed = self._namespace_bare_fetch_all_allowed.isChecked()
namespace_fetch_all_allowed = self._namespace_fetch_all_allowed.isChecked()
fetch_all_allowed = self._fetch_all_allowed.isChecked()
tag_autocomplete_options.SetTuple(
write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
tag_autocomplete_options.SetFetchResultsAutomatically( self._fetch_results_automatically.isChecked() )
tag_autocomplete_options.SetExactMatchCharacterThreshold( self._exact_match_character_threshold.GetValue() )
return tag_autocomplete_options
class EditTagDisplayApplication( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ):
master_service_keys_to_sibling_applicable_service_keys = collections.defaultdict( list, master_service_keys_to_sibling_applicable_service_keys )
master_service_keys_to_parent_applicable_service_keys = collections.defaultdict( list, master_service_keys_to_parent_applicable_service_keys )
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._tag_services_notebook = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services_notebook, 100 )
self._tag_services_notebook.setMinimumWidth( min_width )
services = list( HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES ) )
select_service_key = services[0].GetServiceKey()
for service in services:
master_service_key = service.GetServiceKey()
name = service.GetName()
sibling_applicable_service_keys = master_service_keys_to_sibling_applicable_service_keys[ master_service_key ]
parent_applicable_service_keys = master_service_keys_to_parent_applicable_service_keys[ master_service_key ]
page = self._Panel( self._tag_services_notebook, master_service_key, sibling_applicable_service_keys, parent_applicable_service_keys )
select = master_service_key == select_service_key
self._tag_services_notebook.addTab( page, name )
if select:
self._tag_services_notebook.setCurrentWidget( page )
vbox = QP.VBoxLayout()
message = 'While a tag service normally applies its own siblings and parents to itself, it does not have to. If you want a different service\'s siblings (e.g. putting the PTR\'s siblings on your "my tags"), or multiple services\', then set it here. You can also apply no siblings or parents at all.'
message += os.linesep * 2
message += 'If there are conflicts, the services at the top of the list have precedence. Parents are collapsed by sibling rules before they are applied.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. Changes will start applying as soon as you ok this dialog.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. Changes here will only start to apply when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is sync work to do, you will have to force it to run using the \'review\' window under _tags->siblings and parents sync_.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def GetValue( self ):
master_service_keys_to_sibling_applicable_service_keys = collections.defaultdict( list )
master_service_keys_to_parent_applicable_service_keys = collections.defaultdict( list )
for page in self._tag_services_notebook.GetPages():
( master_service_key, sibling_applicable_service_keys, parent_applicable_service_keys ) = page.GetValue()
master_service_keys_to_sibling_applicable_service_keys[ master_service_key ] = sibling_applicable_service_keys
master_service_keys_to_parent_applicable_service_keys[ master_service_key ] = parent_applicable_service_keys
return ( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys )
class _Panel( QW.QWidget ):
def __init__( self, parent: QW.QWidget, master_service_key: bytes, sibling_applicable_service_keys: typing.Sequence[ bytes ], parent_applicable_service_keys: typing.Sequence[ bytes ] ):
QW.QWidget.__init__( self, parent )
self._master_service_key = master_service_key
#
self._sibling_box = ClientGUICommon.StaticBox( self, 'sibling application' )
#
self._sibling_service_keys_listbox = ClientGUIListBoxes.QueueListBox( self._sibling_box, 4, HG.client_controller.services_manager.GetName, add_callable = self._AddSibling )
#
self._sibling_service_keys_listbox.AddDatas( sibling_applicable_service_keys )
#
self._parent_box = ClientGUICommon.StaticBox( self, 'parent application' )
#
self._parent_service_keys_listbox = ClientGUIListBoxes.QueueListBox( self._sibling_box, 4, HG.client_controller.services_manager.GetName, add_callable = self._AddParent )
#
self._parent_service_keys_listbox.AddDatas( parent_applicable_service_keys )
#
self._sibling_box.Add( self._sibling_service_keys_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
self._parent_box.Add( self._parent_service_keys_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._sibling_box, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._parent_box, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _AddParent( self ):
current_service_keys = self._parent_service_keys_listbox.GetData()
return self._AddService( current_service_keys )
def _AddService( self, current_service_keys ):
allowed_services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
allowed_services = [ service for service in allowed_services if service.GetServiceKey() not in current_service_keys ]
if len( allowed_services ) == 0:
QW.QMessageBox.information( self, 'Information', 'You have all the current tag services applied to this service.' )
raise HydrusExceptions.VetoException()
choice_tuples = [ ( service.GetName(), service.GetServiceKey(), service.GetName() ) for service in allowed_services ]
try:
service_key = ClientGUIDialogsQuick.SelectFromListButtons( self, 'Which service?', choice_tuples )
return service_key
except HydrusExceptions.CancelledException:
raise HydrusExceptions.VetoException()
def _AddSibling( self ):
current_service_keys = self._sibling_service_keys_listbox.GetData()
return self._AddService( current_service_keys )
def GetValue( self ):
return ( self._master_service_key, self._sibling_service_keys_listbox.GetData(), self._parent_service_keys_listbox.GetData() )
class EditTagDisplayManagerPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, tag_display_manager: ClientTagsHandling.TagDisplayManager ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._original_tag_display_manager = tag_display_manager
self._tag_services = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services, 100 )
self._tag_services.setMinimumWidth( min_width )
#
services = list( HG.client_controller.services_manager.GetServices( ( HC.COMBINED_TAG, HC.LOCAL_TAG, HC.TAG_REPOSITORY ) ) )
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_services, self._original_tag_display_manager, service_key )
select = service_key == CC.COMBINED_TAG_SERVICE_KEY
self._tag_services.addTab( page, name )
if select: self._tag_services.setCurrentWidget( page )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_services, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def GetValue( self ):
tag_display_manager = self._original_tag_display_manager.Duplicate()
tag_display_manager.ClearTagDisplayOptions()
for page in self._tag_services.GetPages():
( service_key, tag_display_types_to_tag_filters, tag_autocomplete_options ) = page.GetValue()
for ( tag_display_type, tag_filter ) in tag_display_types_to_tag_filters.items():
tag_display_manager.SetTagFilter( tag_display_type, service_key, tag_filter )
tag_display_manager.SetTagAutocompleteOptions( tag_autocomplete_options )
return tag_display_manager
class _Panel( QW.QWidget ):
def __init__( self, parent: QW.QWidget, tag_display_manager: ClientTagsHandling.TagDisplayManager, service_key: bytes ):
QW.QWidget.__init__( self, parent )
single_tag_filter = tag_display_manager.GetTagFilter( ClientTags.TAG_DISPLAY_SINGLE_MEDIA, service_key )
selection_tag_filter = tag_display_manager.GetTagFilter( ClientTags.TAG_DISPLAY_SELECTION_LIST, service_key )
tag_autocomplete_options = tag_display_manager.GetTagAutocompleteOptions( service_key )
self._service_key = service_key
#
self._display_box = ClientGUICommon.StaticBox( self, 'display' )
message = 'This filters which tags will show on \'single\' file views such as the media viewer and thumbnail banners.'
self._single_tag_filter_button = TagFilterButton( self._display_box, message, single_tag_filter, label_prefix = 'tags shown: ' )
message = 'This filters which tags will show on \'selection\' file views such as the \'selection tags\' list on regular search pages.'
self._selection_tag_filter_button = TagFilterButton( self._display_box, message, selection_tag_filter, label_prefix = 'tags shown: ' )
#
self._tao_box = ClientGUICommon.StaticBox( self, 'autocomplete' )
self._tag_autocomplete_options_panel = EditTagAutocompleteOptionsPanel( self._tao_box, tag_autocomplete_options )
#
rows = []
rows.append( ( 'Tag filter for single file views: ', self._single_tag_filter_button ) )
rows.append( ( 'Tag filter for multiple file views: ', self._selection_tag_filter_button ) )
gridbox = ClientGUICommon.WrapInGrid( self._display_box, rows )
self._display_box.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
self._tao_box.Add( self._tag_autocomplete_options_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
if self._service_key == CC.COMBINED_TAG_SERVICE_KEY:
message = 'These options apply to all tag services, or to where the tag domain is "all known tags".'
message += os.linesep * 2
message += 'This tag domain is the union of all other services, so it can be more computationally expensive. You most often see it on new search pages.'
else:
message = 'This is just one tag service. You most often search a specific tag service in the manage tags dialog.'
st = ClientGUICommon.BetterStaticText( self, message )
st.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._display_box, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tao_box, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def GetValue( self ):
tag_display_types_to_tag_filters = {}
tag_display_types_to_tag_filters[ ClientTags.TAG_DISPLAY_SINGLE_MEDIA ] = self._single_tag_filter_button.GetValue()
tag_display_types_to_tag_filters[ ClientTags.TAG_DISPLAY_SELECTION_LIST ] = self._selection_tag_filter_button.GetValue()
tag_autocomplete_options = self._tag_autocomplete_options_panel.GetValue()
return ( self._service_key, tag_display_types_to_tag_filters, tag_autocomplete_options )
class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ):
TEST_RESULT_DEFAULT = 'Enter a tag here to test if it passes the current filter:'
TEST_RESULT_BLACKLIST_DEFAULT = 'Enter a tag here to test if it passes the blacklist (siblings tested, unnamespaced rules match namespaced tags):'
def __init__( self, parent, tag_filter, only_show_blacklist = False, namespaces = None, message = None ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._only_show_blacklist = only_show_blacklist
self._namespaces = namespaces
self._wildcard_replacements = {}
self._wildcard_replacements[ '*' ] = ''
self._wildcard_replacements[ '*:' ] = ':'
self._wildcard_replacements[ '*:*' ] = ':'
#
help_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().help, self._ShowHelp )
help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', QG.QColor( 0, 0, 255 ) )
#
self._import_favourite = ClientGUICommon.BetterButton( self, 'import', self._ImportFavourite )
self._export_favourite = ClientGUICommon.BetterButton( self, 'export', self._ExportFavourite )
self._load_favourite = ClientGUICommon.BetterButton( self, 'load', self._LoadFavourite )
self._save_favourite = ClientGUICommon.BetterButton( self, 'save', self._SaveFavourite )
self._delete_favourite = ClientGUICommon.BetterButton( self, 'delete', self._DeleteFavourite )
#
self._show_all_panels_button = ClientGUICommon.BetterButton( self, 'show other panels', self._ShowAllPanels )
self._show_all_panels_button.setToolTip( 'This shows the whitelist and advanced panels, in case you want to craft a clever blacklist with \'except\' rules.' )
show_the_button = self._only_show_blacklist and HG.client_controller.new_options.GetBoolean( 'advanced_mode' )
self._show_all_panels_button.setVisible( show_the_button )
#
self._notebook = ClientGUICommon.BetterNotebook( self )
#
self._advanced_panel = self._InitAdvancedPanel()
self._whitelist_panel = self._InitWhitelistPanel()
self._blacklist_panel = self._InitBlacklistPanel()
#
if self._only_show_blacklist:
self._whitelist_panel.setVisible( False )
self._notebook.addTab( self._blacklist_panel, 'blacklist' )
self._advanced_panel.setVisible( False )
else:
self._notebook.addTab( self._whitelist_panel, 'whitelist' )
self._notebook.addTab( self._blacklist_panel, 'blacklist' )
self._notebook.addTab( self._advanced_panel, 'advanced' )
#
self._redundant_st = ClientGUICommon.BetterStaticText( self, '', ellipsize_end = True )
self._current_filter_st = ClientGUICommon.BetterStaticText( self, 'currently keeping: ', ellipsize_end = True )
self._test_result_st = ClientGUICommon.BetterStaticText( self, self.TEST_RESULT_DEFAULT )
self._test_result_st.setAlignment( QC.Qt.AlignVCenter | QC.Qt.AlignRight )
self._test_result_st.setWordWrap( True )
self._test_input = QW.QPlainTextEdit( self )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, help_hbox, CC.FLAGS_ON_RIGHT )
if message is not None:
st = ClientGUICommon.BetterStaticText( self, message )
st.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._import_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._export_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._load_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._save_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._delete_favourite, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( vbox, hbox, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, self._show_all_panels_button, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, self._notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._redundant_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._current_filter_st, CC.FLAGS_EXPAND_PERPENDICULAR )
test_text_vbox = QP.VBoxLayout()
QP.AddToLayout( test_text_vbox, self._test_result_st, CC.FLAGS_EXPAND_PERPENDICULAR )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, test_text_vbox, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( hbox, self._test_input, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( vbox, hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.widget().setLayout( vbox )
#
self._advanced_blacklist.listBoxChanged.connect( self._UpdateStatus )
self._advanced_whitelist.listBoxChanged.connect( self._UpdateStatus )
self._simple_whitelist_global_checkboxes.clicked.connect( self.EventSimpleWhitelistGlobalCheck )
self._simple_whitelist_namespace_checkboxes.clicked.connect( self.EventSimpleWhitelistNamespaceCheck )
self._simple_blacklist_global_checkboxes.clicked.connect( self.EventSimpleBlacklistGlobalCheck )
self._simple_blacklist_namespace_checkboxes.clicked.connect( self.EventSimpleBlacklistNamespaceCheck )
self._test_input.textChanged.connect( self._UpdateTest )
self.SetValue( tag_filter )
def _AdvancedAddBlacklist( self, tag_slice ):
tag_slice = self._CleanTagSliceInput( tag_slice )
if tag_slice in self._advanced_blacklist.GetTagSlices():
self._advanced_blacklist.RemoveTagSlices( ( tag_slice, ) )
else:
self._advanced_whitelist.RemoveTagSlices( ( tag_slice, ) )
if self._CurrentlyBlocked( tag_slice ):
self._ShowRedundantError( HydrusTags.ConvertTagSliceToString( tag_slice ) + ' is already blocked by a broader rule!' )
self._advanced_blacklist.AddTagSlices( ( tag_slice, ) )
self._UpdateStatus()
def _AdvancedAddBlacklistButton( self ):
tag_slice = self._advanced_blacklist_input.GetValue()
self._AdvancedAddBlacklist( tag_slice )
def _AdvancedAddBlacklistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _AdvancedAddWhitelist( self, tag_slice ):
tag_slice = self._CleanTagSliceInput( tag_slice )
if tag_slice in self._advanced_whitelist.GetTagSlices():
self._advanced_whitelist.RemoveTagSlices( ( tag_slice, ) )
else:
self._advanced_blacklist.RemoveTagSlices( ( tag_slice, ) )
# if it is still blocked after that, it needs whitelisting explicitly
if not self._CurrentlyBlocked( tag_slice ) and tag_slice not in ( '', ':' ):
self._ShowRedundantError( HydrusTags.ConvertTagSliceToString( tag_slice ) + ' is already permitted by a broader rule!' )
self._advanced_whitelist.AddTagSlices( ( tag_slice, ) )
self._UpdateStatus()
def _AdvancedAddWhitelistButton( self ):
tag_slice = self._advanced_whitelist_input.GetValue()
self._AdvancedAddWhitelist( tag_slice )
def _AdvancedAddWhitelistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddWhitelist( tag_slice )
def _AdvancedBlacklistEverything( self ):
self._advanced_blacklist.SetTagSlices( [] )
self._advanced_whitelist.RemoveTagSlices( ( '', ':' ) )
self._advanced_blacklist.AddTagSlices( ( '', ':' ) )
self._UpdateStatus()
def _AdvancedDeleteBlacklist( self ):
selected_tag_slices = self._advanced_blacklist.GetSelectedTagSlices()
if len( selected_tag_slices ) > 0:
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
self._advanced_blacklist.RemoveTagSlices( selected_tag_slices )
self._UpdateStatus()
def _AdvancedDeleteWhitelist( self ):
selected_tag_slices = self._advanced_whitelist.GetSelectedTagSlices()
if len( selected_tag_slices ) > 0:
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
self._advanced_whitelist.RemoveTagSlices( selected_tag_slices )
self._UpdateStatus()
def _CleanTagSliceInput( self, tag_slice ):
tag_slice = tag_slice.lower().strip()
while '**' in tag_slice:
tag_slice = tag_slice.replace( '**', '*' )
if tag_slice in self._wildcard_replacements:
tag_slice = self._wildcard_replacements[ tag_slice ]
if ':' in tag_slice:
( namespace, subtag ) = HydrusTags.SplitTag( tag_slice )
if subtag == '*':
tag_slice = '{}:'.format( namespace )
return tag_slice
def _CurrentlyBlocked( self, tag_slice ):
if tag_slice in ( '', ':' ):
test_slices = { tag_slice }
elif tag_slice.count( ':' ) == 1 and tag_slice.endswith( ':' ):
test_slices = { ':', tag_slice }
elif ':' in tag_slice:
( ns, st ) = HydrusTags.SplitTag( tag_slice )
test_slices = { ':', ns + ':', tag_slice }
else:
test_slices = { '', tag_slice }
blacklist = set( self._advanced_blacklist.GetTagSlices() )
return not blacklist.isdisjoint( test_slices )
def _DeleteFavourite( self ):
def do_it( name ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
if name in names_to_tag_filters:
message = 'Delete "{}"?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
del names_to_tag_filters[ name ]
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'delete {}'.format( name ), do_it, name )
CGC.core().PopupMenu( self, menu )
def _ExportFavourite( self ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'load {}'.format( name ), HG.client_controller.pub, 'clipboard', 'text', tag_filter.DumpToString() )
CGC.core().PopupMenu( self, menu )
def _GetWhiteBlacklistsPossible( self ):
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_is_only_simples = set( blacklist_tag_slices ).issubset( { '', ':' } )
nothing_is_whitelisted = len( whitelist_tag_slices ) == 0
whitelist_possible = blacklist_is_only_simples
blacklist_possible = nothing_is_whitelisted
return ( whitelist_possible, blacklist_possible )
def _ImportFavourite( self ):
try:
raw_text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
try:
obj = HydrusSerialisable.CreateFromString( raw_text )
except Exception as e:
QW.QMessageBox.critical( self, 'Error', 'I could not understand what was in the clipboard' )
return
if not isinstance( obj, HydrusTags.TagFilter ):
QW.QMessageBox.critical( self, 'Error', 'That object was not a Tag Filter! It seemed to be a "{}".'.format(type(obj)) )
return
tag_filter = obj
with ClientGUIDialogs.DialogTextEntry( self, 'Enter a name for the favourite.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
name = dlg.GetValue()
if name in names_to_tag_filters:
message = '"{}" already exists! Overwrite?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
names_to_tag_filters[ name ] = tag_filter
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
self.SetValue( tag_filter )
def _InitAdvancedPanel( self ):
advanced_panel = QW.QWidget( self._notebook )
#
blacklist_panel = ClientGUICommon.StaticBox( advanced_panel, 'exclude these' )
self._advanced_blacklist = ClientGUIListBoxes.ListBoxTagsFilter( blacklist_panel )
self._advanced_blacklist_input = ClientGUIControls.TextAndPasteCtrl( blacklist_panel, self._AdvancedAddBlacklistMultiple, allow_empty_input = True )
add_blacklist_button = ClientGUICommon.BetterButton( blacklist_panel, 'add', self._AdvancedAddBlacklistButton )
delete_blacklist_button = ClientGUICommon.BetterButton( blacklist_panel, 'delete', self._AdvancedDeleteBlacklist )
blacklist_everything_button = ClientGUICommon.BetterButton( blacklist_panel, 'block everything', self._AdvancedBlacklistEverything )
#
whitelist_panel = ClientGUICommon.StaticBox( advanced_panel, 'except for these' )
self._advanced_whitelist = ClientGUIListBoxes.ListBoxTagsFilter( whitelist_panel )
self._advanced_whitelist_input = ClientGUIControls.TextAndPasteCtrl( whitelist_panel, self._AdvancedAddWhitelistMultiple, allow_empty_input = True )
self._advanced_add_whitelist_button = ClientGUICommon.BetterButton( whitelist_panel, 'add', self._AdvancedAddWhitelistButton )
delete_whitelist_button = ClientGUICommon.BetterButton( whitelist_panel, 'delete', self._AdvancedDeleteWhitelist )
#
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._advanced_blacklist_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( button_hbox, add_blacklist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, delete_blacklist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, blacklist_everything_button, CC.FLAGS_CENTER_PERPENDICULAR )
blacklist_panel.Add( self._advanced_blacklist, CC.FLAGS_EXPAND_BOTH_WAYS )
blacklist_panel.Add( button_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._advanced_whitelist_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( button_hbox, self._advanced_add_whitelist_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, delete_whitelist_button, CC.FLAGS_CENTER_PERPENDICULAR )
whitelist_panel.Add( self._advanced_whitelist, CC.FLAGS_EXPAND_BOTH_WAYS )
whitelist_panel.Add( button_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, blacklist_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( hbox, whitelist_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
advanced_panel.setLayout( hbox )
return advanced_panel
def _InitBlacklistPanel( self ):
blacklist_panel = QW.QWidget( self._notebook )
#
self._simple_blacklist_error_st = ClientGUICommon.BetterStaticText( blacklist_panel )
self._simple_blacklist_global_checkboxes = QP.CheckListBox( blacklist_panel )
self._simple_blacklist_global_checkboxes.Append( 'unnamespaced tags', '' )
self._simple_blacklist_global_checkboxes.Append( 'namespaced tags', ':' )
self._simple_blacklist_namespace_checkboxes = QP.CheckListBox( blacklist_panel )
for namespace in self._namespaces:
if namespace == '':
continue
self._simple_blacklist_namespace_checkboxes.Append( namespace, namespace + ':' )
self._simple_blacklist = ClientGUIListBoxes.ListBoxTagsFilter( blacklist_panel )
self._simple_blacklist_input = ClientGUIControls.TextAndPasteCtrl( blacklist_panel, self._SimpleAddBlacklistMultiple, allow_empty_input = True )
#
left_vbox = QP.VBoxLayout()
QP.AddToLayout( left_vbox, self._simple_blacklist_global_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
left_vbox.addStretch( 1 )
QP.AddToLayout( left_vbox, self._simple_blacklist_namespace_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
right_vbox = QP.VBoxLayout()
QP.AddToLayout( right_vbox, self._simple_blacklist, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( right_vbox, self._simple_blacklist_input, CC.FLAGS_EXPAND_PERPENDICULAR )
main_hbox = QP.HBoxLayout()
QP.AddToLayout( main_hbox, left_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( main_hbox, right_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._simple_blacklist_error_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, main_hbox, CC.FLAGS_EXPAND_BOTH_WAYS )
blacklist_panel.setLayout( vbox )
self._simple_blacklist.tagsRemoved.connect( self._SimpleBlacklistRemoved )
return blacklist_panel
def _InitWhitelistPanel( self ):
whitelist_panel = QW.QWidget( self._notebook )
#
self._simple_whitelist_error_st = ClientGUICommon.BetterStaticText( whitelist_panel )
self._simple_whitelist_global_checkboxes = QP.CheckListBox( whitelist_panel )
self._simple_whitelist_global_checkboxes.Append( 'unnamespaced tags', '' )
self._simple_whitelist_global_checkboxes.Append( 'namespaced tags', ':' )
self._simple_whitelist_namespace_checkboxes = QP.CheckListBox( whitelist_panel )
for namespace in self._namespaces:
if namespace == '':
continue
self._simple_whitelist_namespace_checkboxes.Append( namespace, namespace + ':' )
self._simple_whitelist = ClientGUIListBoxes.ListBoxTagsFilter( whitelist_panel )
self._simple_whitelist_input = ClientGUIControls.TextAndPasteCtrl( whitelist_panel, self._SimpleAddWhitelistMultiple, allow_empty_input = True )
#
left_vbox = QP.VBoxLayout()
QP.AddToLayout( left_vbox, self._simple_whitelist_global_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
left_vbox.addStretch( 1 )
QP.AddToLayout( left_vbox, self._simple_whitelist_namespace_checkboxes, CC.FLAGS_EXPAND_PERPENDICULAR )
right_vbox = QP.VBoxLayout()
QP.AddToLayout( right_vbox, self._simple_whitelist, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( right_vbox, self._simple_whitelist_input, CC.FLAGS_EXPAND_PERPENDICULAR )
main_hbox = QP.HBoxLayout()
QP.AddToLayout( main_hbox, left_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( main_hbox, right_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._simple_whitelist_error_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, main_hbox, CC.FLAGS_EXPAND_BOTH_WAYS )
whitelist_panel.setLayout( vbox )
self._simple_whitelist.tagsRemoved.connect( self._SimpleWhitelistRemoved )
return whitelist_panel
def _LoadFavourite( self ):
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
menu = QW.QMenu()
if len( names_to_tag_filters ) == 0:
ClientGUIMenus.AppendMenuLabel( menu, 'no favourites set!' )
else:
for ( name, tag_filter ) in names_to_tag_filters.items():
ClientGUIMenus.AppendMenuItem( menu, name, 'load {}'.format( name ), self.SetValue, tag_filter )
CGC.core().PopupMenu( self, menu )
def _SaveFavourite( self ):
with ClientGUIDialogs.DialogTextEntry( self, 'Enter a name for the favourite.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
names_to_tag_filters = HG.client_controller.new_options.GetFavouriteTagFilters()
name = dlg.GetValue()
tag_filter = self.GetValue()
if name in names_to_tag_filters:
message = '"{}" already exists! Overwrite?'.format( name )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
names_to_tag_filters[ name ] = tag_filter
HG.client_controller.new_options.SetFavouriteTagFilters( names_to_tag_filters )
def _ShowAllPanels( self ):
self._whitelist_panel.setVisible( True )
self._advanced_panel.setVisible( True )
self._notebook.addTab( self._whitelist_panel, 'whitelist' )
self._notebook.addTab( self._advanced_panel, 'advanced' )
self._show_all_panels_button.setVisible( False )
def _ShowHelp( self ):
help = 'Here you can set rules to filter tags for one purpose or another. The default is typically to permit all tags. Check the current filter summary text at the bottom-left of the panel to ensure you have your logic correct.'
help += os.linesep * 2
help += 'The different tabs are multiple ways of looking at the filter--sometimes it is more useful to think about a filter as a whitelist (where only the listed contents are kept) or a blacklist (where everything _except_ the listed contents are kept), and there is also an advanced tab that lets you do a more complicated combination of the two.'
help += os.linesep * 2
help += 'As well as selecting broader categories of tags with the checkboxes, you can type or paste the individual tags directly--just hit enter to add each one--and double-click an existing entry in a list to remove it.'
help += os.linesep * 2
help += 'If you wish to manually type a special tag, use these shorthands:'
help += os.linesep * 2
help += '"namespace:" - all instances of that namespace'
help += os.linesep
help += '":" - all namespaced tags'
help += os.linesep
help += '"" (i.e. an empty string) - all unnamespaced tags'
QW.QMessageBox.information( self, 'Information', help )
def _ShowRedundantError( self, text ):
self._redundant_st.setText( text )
HG.client_controller.CallLaterQtSafe( self._redundant_st, 2, self._redundant_st.setText, '' )
def _SimpleAddBlacklistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _SimpleAddWhitelistMultiple( self, tag_slices ):
for tag_slice in tag_slices:
if tag_slice in ( '', ':' ) and tag_slice in self._simple_whitelist.GetTagSlices():
self._AdvancedAddBlacklist( tag_slice )
else:
self._AdvancedAddWhitelist( tag_slice )
def _SimpleBlacklistRemoved( self, tag_slices ):
for tag_slice in tag_slices:
self._AdvancedAddBlacklist( tag_slice )
def _SimpleBlacklistReset( self ):
pass
def _SimpleWhitelistRemoved( self, tag_slices ):
tag_slices = set( tag_slices )
for simple in ( '', ':' ):
if simple in tag_slices:
tag_slices.discard( simple )
self._AdvancedAddBlacklist( simple )
for tag_slice in tag_slices:
self._AdvancedAddWhitelist( tag_slice )
def _SimpleWhitelistReset( self ):
pass
def _UpdateStatus( self ):
( whitelist_possible, blacklist_possible ) = self._GetWhiteBlacklistsPossible()
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if whitelist_possible:
self._simple_whitelist_error_st.clear()
self._simple_whitelist.setEnabled( True )
self._simple_whitelist_global_checkboxes.setEnabled( True )
self._simple_whitelist_input.setEnabled( True )
whitelist_tag_slices = set( whitelist_tag_slices )
if not self._CurrentlyBlocked( '' ):
whitelist_tag_slices.add( '' )
if not self._CurrentlyBlocked( ':' ):
whitelist_tag_slices.add( ':' )
self._simple_whitelist_namespace_checkboxes.setEnabled( False )
else:
self._simple_whitelist_namespace_checkboxes.setEnabled( True )
self._simple_whitelist.SetTagSlices( whitelist_tag_slices )
for index in range( self._simple_whitelist_global_checkboxes.count() ):
check = QP.GetClientData( self._simple_whitelist_global_checkboxes, index ) in whitelist_tag_slices
self._simple_whitelist_global_checkboxes.Check( index, check )
for index in range( self._simple_whitelist_namespace_checkboxes.count() ):
check = QP.GetClientData( self._simple_whitelist_namespace_checkboxes, index ) in whitelist_tag_slices
self._simple_whitelist_namespace_checkboxes.Check( index, check )
else:
self._simple_whitelist_error_st.setText( 'The filter is currently more complicated than a simple whitelist, so cannot be shown here.' )
self._simple_whitelist.setEnabled( False )
self._simple_whitelist_global_checkboxes.setEnabled( False )
self._simple_whitelist_namespace_checkboxes.setEnabled( False )
self._simple_whitelist_input.setEnabled( False )
self._simple_whitelist.SetTagSlices( '' )
for index in range( self._simple_whitelist_global_checkboxes.count() ):
self._simple_whitelist_global_checkboxes.Check( index, False )
for index in range( self._simple_whitelist_namespace_checkboxes.count() ):
self._simple_whitelist_namespace_checkboxes.Check( index, False )
#
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if blacklist_possible:
self._simple_blacklist_error_st.clear()
self._simple_blacklist.setEnabled( True )
self._simple_blacklist_global_checkboxes.setEnabled( True )
self._simple_blacklist_input.setEnabled( True )
if self._CurrentlyBlocked( ':' ):
self._simple_blacklist_namespace_checkboxes.setEnabled( False )
else:
self._simple_blacklist_namespace_checkboxes.setEnabled( True )
self._simple_blacklist.SetTagSlices( blacklist_tag_slices )
for index in range( self._simple_blacklist_global_checkboxes.count() ):
check = QP.GetClientData( self._simple_blacklist_global_checkboxes, index ) in blacklist_tag_slices
self._simple_blacklist_global_checkboxes.Check( index, check )
for index in range( self._simple_blacklist_namespace_checkboxes.count() ):
check = QP.GetClientData( self._simple_blacklist_namespace_checkboxes, index ) in blacklist_tag_slices
self._simple_blacklist_namespace_checkboxes.Check( index, check )
else:
self._simple_blacklist_error_st.setText( 'The filter is currently more complicated than a simple blacklist, so cannot be shown here.' )
self._simple_blacklist.setEnabled( False )
self._simple_blacklist_global_checkboxes.setEnabled( False )
self._simple_blacklist_namespace_checkboxes.setEnabled( False )
self._simple_blacklist_input.setEnabled( False )
self._simple_blacklist.SetTagSlices( '' )
for index in range( self._simple_blacklist_global_checkboxes.count() ):
self._simple_blacklist_global_checkboxes.Check( index, False )
for index in range( self._simple_blacklist_namespace_checkboxes.count() ):
self._simple_blacklist_namespace_checkboxes.Check( index, False )
#
whitelist_tag_slices = self._advanced_whitelist.GetTagSlices()
blacklist_tag_slices = self._advanced_blacklist.GetTagSlices()
if len( blacklist_tag_slices ) == 0:
self._advanced_whitelist_input.setEnabled( False )
self._advanced_add_whitelist_button.setEnabled( False )
else:
self._advanced_whitelist_input.setEnabled( True )
self._advanced_add_whitelist_button.setEnabled( True )
#
tag_filter = self.GetValue()
if self._only_show_blacklist:
pretty_tag_filter = tag_filter.ToBlacklistString()
else:
pretty_tag_filter = 'currently keeping: {}'.format( tag_filter.ToPermittedString() )
self._current_filter_st.setText( pretty_tag_filter )
self._UpdateTest()
def _UpdateTest( self ):
test_input = self._test_input.toPlainText()
if test_input == '':
if self._only_show_blacklist:
test_result_text = self.TEST_RESULT_BLACKLIST_DEFAULT
else:
test_result_text = self.TEST_RESULT_DEFAULT
self._test_result_st.setObjectName( '' )
self._test_result_st.setText( test_result_text )
self._test_result_st.style().polish( self._test_result_st )
else:
test_tags = HydrusText.DeserialiseNewlinedTexts( test_input )
test_tags = HydrusTags.CleanTags( test_tags )
tag_filter = self.GetValue()
self._test_result_st.setObjectName( '' )
self._test_result_st.clear()
self._test_result_st.style().polish( self._test_result_st )
if self._only_show_blacklist:
def work_callable():
results = []
tags_to_siblings = HG.client_controller.Read( 'tag_siblings_lookup', CC.COMBINED_TAG_SERVICE_KEY, test_tags )
for ( test_tag, siblings ) in tags_to_siblings.items():
results.append( False not in ( tag_filter.TagOK( sibling_tag, apply_unnamespaced_rules_to_namespaced_tags = True ) for sibling_tag in siblings ) )
return results
else:
def work_callable():
results = [ tag_filter.TagOK( test_tag ) for test_tag in test_tags ]
return results
def publish_callable( results ):
all_good = False not in results
all_bad = True not in results
if len( results ) == 1:
if all_good:
test_result_text = 'tag passes!'
self._test_result_st.setObjectName( 'HydrusValid' )
else:
test_result_text = 'tag blocked!'
self._test_result_st.setObjectName( 'HydrusInvalid' )
else:
if all_good:
test_result_text = 'all pass!'
self._test_result_st.setObjectName( 'HydrusValid' )
elif all_bad:
test_result_text = 'all blocked!'
self._test_result_st.setObjectName( 'HydrusInvalid' )
else:
c = collections.Counter()
c.update( results )
test_result_text = '{} pass, {} blocked!'.format( HydrusData.ToHumanInt( c[ True ] ), HydrusData.ToHumanInt( c[ False ] ) )
self._test_result_st.setObjectName( 'HydrusInvalid' )
self._test_result_st.setText( test_result_text )
self._test_result_st.style().polish( self._test_result_st )
async_job = ClientGUIAsync.AsyncQtJob( self, work_callable, publish_callable )
async_job.start()
def EventSimpleBlacklistNamespaceCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_blacklist_namespace_checkboxes, index )
self._AdvancedAddBlacklist( tag_slice )
def EventSimpleBlacklistGlobalCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_blacklist_global_checkboxes, index )
self._AdvancedAddBlacklist( tag_slice )
def EventSimpleWhitelistNamespaceCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_whitelist_namespace_checkboxes, index )
self._AdvancedAddWhitelist( tag_slice )
def EventSimpleWhitelistGlobalCheck( self, index ):
index = index.row()
if index != -1:
tag_slice = QP.GetClientData( self._simple_whitelist_global_checkboxes, index )
if tag_slice in ( '', ':' ) and tag_slice in self._simple_whitelist.GetTagSlices():
self._AdvancedAddBlacklist( tag_slice )
else:
self._AdvancedAddWhitelist( tag_slice )
def GetValue( self ):
tag_filter = HydrusTags.TagFilter()
for tag_slice in self._advanced_blacklist.GetTagSlices():
tag_filter.SetRule( tag_slice, HC.FILTER_BLACKLIST )
for tag_slice in self._advanced_whitelist.GetTagSlices():
tag_filter.SetRule( tag_slice, HC.FILTER_WHITELIST )
return tag_filter
def SetValue( self, tag_filter: HydrusTags.TagFilter ):
blacklist_tag_slices = [ tag_slice for ( tag_slice, rule ) in tag_filter.GetTagSlicesToRules().items() if rule == HC.FILTER_BLACKLIST ]
whitelist_tag_slices = [ tag_slice for ( tag_slice, rule ) in tag_filter.GetTagSlicesToRules().items() if rule == HC.FILTER_WHITELIST ]
self._advanced_blacklist.SetTagSlices( blacklist_tag_slices )
self._advanced_whitelist.SetTagSlices( whitelist_tag_slices )
( whitelist_possible, blacklist_possible ) = self._GetWhiteBlacklistsPossible()
selection_tests = []
if self._only_show_blacklist:
selection_tests.append( ( blacklist_possible, self._blacklist_panel ) )
else:
selection_tests.append( ( whitelist_possible, self._whitelist_panel ) )
selection_tests.append( ( blacklist_possible, self._blacklist_panel ) )
selection_tests.append( ( True, self._advanced_panel ) )
for ( test, page ) in selection_tests:
if test:
self._notebook.SelectPage( page )
break
self._UpdateStatus()
class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, file_service_key, media, immediate_commit = False, canvas_key = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._file_service_key = file_service_key
self._immediate_commit = immediate_commit
self._canvas_key = canvas_key
media = ClientMedia.FlattenMedia( media )
self._current_media = [ m.Duplicate() for m in media ]
self._hashes = set()
for m in self._current_media:
self._hashes.update( m.GetHashes() )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
#
services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_repositories, self._file_service_key, service.GetServiceKey(), self._current_media, self._immediate_commit, canvas_key = self._canvas_key )
page._add_tag_box.selectUp.connect( self.EventSelectUp )
page._add_tag_box.selectDown.connect( self.EventSelectDown )
page._add_tag_box.showPrevious.connect( self.EventShowPrevious )
page._add_tag_box.showNext.connect( self.EventShowNext )
page.okSignal.connect( self.okSignal )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentIndex( self._tag_repositories.count() - 1 )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
if self._canvas_key is not None:
HG.client_controller.sub( self, 'CanvasHasNewMedia', 'canvas_new_display_media' )
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'media', 'main_gui' ] )
self._tag_repositories.currentChanged.connect( self.EventServiceChanged )
self._SetSearchFocus()
def _GetGroupsOfServiceKeysToContentUpdates( self ):
groups_of_service_keys_to_content_updates = []
for page in self._tag_repositories.GetPages():
( service_key, groups_of_content_updates ) = page.GetGroupsOfContentUpdates()
for content_updates in groups_of_content_updates:
if len( content_updates ) > 0:
service_keys_to_content_updates = { service_key : content_updates }
groups_of_service_keys_to_content_updates.append( service_keys_to_content_updates )
return groups_of_service_keys_to_content_updates
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CanvasHasNewMedia( self, canvas_key, new_media_singleton ):
if canvas_key == self._canvas_key:
if new_media_singleton is not None:
self._current_media = ( new_media_singleton.Duplicate(), )
for page in self._tag_repositories.GetPages():
page.SetMedia( self._current_media )
def CleanBeforeDestroy( self ):
ClientGUIScrolledPanels.ManagePanel.CleanBeforeDestroy( self )
for page in self._tag_repositories.GetPages():
page.CleanBeforeDestroy()
def CommitChanges( self ):
groups_of_service_keys_to_content_updates = self._GetGroupsOfServiceKeysToContentUpdates()
for service_keys_to_content_updates in groups_of_service_keys_to_content_updates:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def EventSelectDown( self ):
self._tag_repositories.SelectRight()
self._SetSearchFocus()
def EventSelectUp( self ):
self._tag_repositories.SelectLeft()
self._SetSearchFocus()
def EventShowNext( self ):
if self._canvas_key is not None:
HG.client_controller.pub( 'canvas_show_next', self._canvas_key )
def EventShowPrevious( self ):
if self._canvas_key is not None:
HG.client_controller.pub( 'canvas_show_previous', self._canvas_key )
def EventServiceChanged( self, index ):
if not self or not QP.isValid( self ): # actually did get a runtime error here, on some Linux WM dialog shutdown
return
if self.sender() != self._tag_repositories:
return
page = self._tag_repositories.currentWidget()
if page is not None:
HG.client_controller.CallAfterQtSafe( page, page.SetTagBoxFocus )
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
data = command.GetData()
if command.IsSimpleCommand():
action = data
if action == CAC.SIMPLE_MANAGE_FILE_TAGS:
self._OKParent()
elif action == CAC.SIMPLE_FOCUS_MEDIA_VIEWER:
tlws = ClientGUIFunctions.GetTLWParents( self )
from hydrus.client.gui import ClientGUICanvasFrame
command_processed = False
for tlw in tlws:
if isinstance( tlw, ClientGUICanvasFrame.CanvasFrame ):
tlw.TakeFocusForUser()
command_processed = True
break
elif action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self._SetSearchFocus()
else:
command_processed = False
else:
command_processed = False
return command_processed
def UserIsOKToCancel( self ):
groups_of_service_keys_to_content_updates = self._GetGroupsOfServiceKeysToContentUpdates()
if len( groups_of_service_keys_to_content_updates ) > 0:
message = 'Are you sure you want to cancel? You have uncommitted changes that will be lost.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
class _Panel( QW.QWidget ):
okSignal = QC.Signal()
def __init__( self, parent, file_service_key, tag_service_key, media, immediate_commit, canvas_key = None ):
QW.QWidget.__init__( self, parent )
self._file_service_key = file_service_key
self._tag_service_key = tag_service_key
self._immediate_commit = immediate_commit
self._canvas_key = canvas_key
self._groups_of_content_updates = []
self._service = HG.client_controller.services_manager.GetService( self._tag_service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._tags_box_sorter = ClientGUIListBoxes.StaticBoxSorterForListBoxTags( self, 'tags', show_siblings_sort = True )
self._tags_box = ClientGUIListBoxes.ListBoxTagsMediaTagsDialog( self._tags_box_sorter, self.EnterTags, self.RemoveTags )
self._tags_box_sorter.SetTagsBox( self._tags_box )
#
self._new_options = HG.client_controller.new_options
if self._i_am_local_tag_service:
text = 'remove all/selected tags'
else:
text = 'petition to remove all/selected tags'
self._remove_tags = ClientGUICommon.BetterButton( self._tags_box_sorter, text, self._RemoveTagsButton )
self._copy_button = ClientGUICommon.BetterBitmapButton( self._tags_box_sorter, CC.global_pixmaps().copy, self._Copy )
self._copy_button.setToolTip( 'Copy selected tags to the clipboard. If none are selected, copies all.' )
self._paste_button = ClientGUICommon.BetterBitmapButton( self._tags_box_sorter, CC.global_pixmaps().paste, self._Paste )
self._paste_button.setToolTip( 'Paste newline-separated tags from the clipboard into here.' )
self._show_deleted = False
menu_items = []
check_manager = ClientGUICommon.CheckboxManagerOptions( 'allow_remove_on_manage_tags_input' )
menu_items.append( ( 'check', 'allow remove/petition result on tag input for already existing tag', 'If checked, inputting a tag that already exists will try to remove it.', check_manager ) )
check_manager = ClientGUICommon.CheckboxManagerOptions( 'yes_no_on_remove_on_manage_tags' )
menu_items.append( ( 'check', 'confirm remove/petition tags on explicit delete actions', 'If checked, clicking the remove/petition tags button (or hitting the deleted key on the list) will first confirm the action with a yes/no dialog.', check_manager ) )
check_manager = ClientGUICommon.CheckboxManagerCalls( self._FlipShowDeleted, lambda: self._show_deleted )
menu_items.append( ( 'check', 'show deleted', 'Show deleted tags, if any.', check_manager ) )
menu_items.append( ( 'separator', 0, 0, 0 ) )
menu_items.append( ( 'normal', 'migrate tags for these files', 'Migrate the tags for the files used to launch this manage tags panel.', self._MigrateTags ) )
if not self._i_am_local_tag_service and self._service.HasPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_MODERATE ):
menu_items.append( ( 'separator', 0, 0, 0 ) )
menu_items.append( ( 'normal', 'modify users who added the selected tags', 'Modify the users who added the selected tags.', self._ModifyMappers ) )
self._cog_button = ClientGUIMenuButton.MenuBitmapButton( self._tags_box_sorter, CC.global_pixmaps().cog, menu_items )
#
self._add_tag_box = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.AddTags, self._file_service_key, self._tag_service_key, null_entry_callable = self.OK )
self._tags_box.SetTagServiceKey( self._tag_service_key )
self._suggested_tags = ClientGUITagSuggestions.SuggestedTagsPanel( self, self._tag_service_key, media, self.AddTags )
self.SetMedia( media )
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._remove_tags, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._copy_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._paste_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( button_hbox, self._cog_button, CC.FLAGS_CENTER )
self._tags_box_sorter.Add( button_hbox, CC.FLAGS_ON_RIGHT )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tags_box_sorter, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add_tag_box )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._suggested_tags, CC.FLAGS_EXPAND_BOTH_WAYS_POLITE )
QP.AddToLayout( hbox, vbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
#
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'main_gui' ] )
self.setLayout( hbox )
if self._immediate_commit:
HG.client_controller.sub( self, 'ProcessContentUpdates', 'content_updates_gui' )
self._suggested_tags.mouseActivationOccurred.connect( self.SetTagBoxFocus )
def _EnterTags( self, tags, only_add = False, only_remove = False, forced_reason = None ):
tags = HydrusTags.CleanTags( tags )
if not self._i_am_local_tag_service and self._service.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_MODERATE ):
forced_reason = 'admin'
tags_managers = [ m.GetTagsManager() for m in self._media ]
currents = [ tags_manager.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
pendings = [ tags_manager.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
petitioneds = [ tags_manager.GetPetitioned( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) for tags_manager in tags_managers ]
num_files = len( self._media )
# let's figure out what these tags can mean for the media--add, remove, or what?
choices = collections.defaultdict( list )
for tag in tags:
num_current = sum( ( 1 for current in currents if tag in current ) )
if self._i_am_local_tag_service:
if not only_remove:
if num_current < num_files:
num_non_current = num_files - num_current
choices[ HC.CONTENT_UPDATE_ADD ].append( ( tag, num_non_current ) )
if not only_add:
if num_current > 0:
choices[ HC.CONTENT_UPDATE_DELETE ].append( ( tag, num_current ) )
else:
num_pending = sum( ( 1 for pending in pendings if tag in pending ) )
num_petitioned = sum( ( 1 for petitioned in petitioneds if tag in petitioned ) )
if not only_remove:
if num_current + num_pending < num_files:
num_pendable = num_files - ( num_current + num_pending )
choices[ HC.CONTENT_UPDATE_PEND ].append( ( tag, num_pendable ) )
if not only_add:
if num_current > num_petitioned and not only_add:
num_petitionable = num_current - num_petitioned
choices[ HC.CONTENT_UPDATE_PETITION ].append( ( tag, num_petitionable ) )
if num_pending > 0 and not only_add:
choices[ HC.CONTENT_UPDATE_RESCIND_PEND ].append( ( tag, num_pending ) )
if not only_remove:
if num_petitioned > 0:
choices[ HC.CONTENT_UPDATE_RESCIND_PETITION ].append( ( tag, num_petitioned ) )
if len( choices ) == 0:
return
if len( choices ) == 1:
[ ( choice_action, tag_counts ) ] = list( choices.items() )
tags = { tag for ( tag, count ) in tag_counts }
else:
bdc_choices = []
preferred_order = [ HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_PETITION, HC.CONTENT_UPDATE_RESCIND_PETITION ]
choice_text_lookup = {}
choice_text_lookup[ HC.CONTENT_UPDATE_ADD ] = 'add'
choice_text_lookup[ HC.CONTENT_UPDATE_DELETE ] = 'delete'
choice_text_lookup[ HC.CONTENT_UPDATE_PEND ] = 'pend (add)'
choice_text_lookup[ HC.CONTENT_UPDATE_PETITION ] = 'petition to remove'
choice_text_lookup[ HC.CONTENT_UPDATE_RESCIND_PEND ] = 'undo pend'
choice_text_lookup[ HC.CONTENT_UPDATE_RESCIND_PETITION ] = 'undo petition to remove'
choice_tooltip_lookup = {}
choice_tooltip_lookup[ HC.CONTENT_UPDATE_ADD ] = 'this adds the tags to this local tag service'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_DELETE ] = 'this deletes the tags from this local tag service'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_PEND ] = 'this pends the tags to be added to this tag repository when you upload'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_PETITION ] = 'this petitions the tags for deletion from this tag repository when you upload'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_RESCIND_PEND ] = 'this rescinds the currently pending tags, so they will not be added'
choice_tooltip_lookup[ HC.CONTENT_UPDATE_RESCIND_PETITION ] = 'this rescinds the current tag petitions, so they will not be deleted'
for choice_action in preferred_order:
if choice_action not in choices:
continue
choice_text_prefix = choice_text_lookup[ choice_action ]
tag_counts = choices[ choice_action ]
choice_tags = { tag for ( tag, count ) in tag_counts }
if len( choice_tags ) == 1:
[ ( tag, count ) ] = tag_counts
text = '{} "{}" for {} files'.format( choice_text_prefix, HydrusText.ElideText( tag, 64 ), HydrusData.ToHumanInt( count ) )
else:
text = '{} {} tags'.format( choice_text_prefix, HydrusData.ToHumanInt( len( choice_tags ) ) )
data = ( choice_action, choice_tags )
t_c_lines = [ choice_tooltip_lookup[ choice_action ] ]
if len( tag_counts ) > 25:
t_c = tag_counts[:25]
else:
t_c = tag_counts
t_c_lines.extend( ( '{} - {} files'.format( tag, HydrusData.ToHumanInt( count ) ) for ( tag, count ) in t_c ) )
if len( tag_counts ) > 25:
t_c_lines.append( 'and {} others'.format( HydrusData.ToHumanInt( len( tag_counts ) - 25 ) ) )
tooltip = os.linesep.join( t_c_lines )
bdc_choices.append( ( text, data, tooltip ) )
try:
if len( tags ) > 1:
message = 'The file{} some of those tags, but not all, so there are different things you can do.'.format( 's have' if len( self._media ) > 1 else ' has' )
else:
message = 'Of the {} files being managed, some have that tag, but not all of them do, so there are different things you can do.'.format( HydrusData.ToHumanInt( len( self._media ) ) )
( choice_action, tags ) = ClientGUIDialogsQuick.SelectFromListButtons( self, 'What would you like to do?', bdc_choices, message = message )
except HydrusExceptions.CancelledException:
return
reason = None
if choice_action == HC.CONTENT_UPDATE_PETITION:
if forced_reason is None:
# add the easy reason buttons here
if len( tags ) == 1:
( tag, ) = tags
tag_text = '"' + tag + '"'
else:
tag_text = 'the ' + HydrusData.ToHumanInt( len( tags ) ) + ' tags'
message = 'Enter a reason for ' + tag_text + ' to be removed. A janitor will review your petition.'
suggestions = []
suggestions.append( 'mangled parse/typo' )
suggestions.append( 'not applicable' )
suggestions.append( 'should be namespaced' )
suggestions.append( 'splitting filename/title/etc... into individual tags' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
return
else:
reason = forced_reason
# we have an action and tags, so let's effect the content updates
content_updates_group = []
recent_tags = set()
medias_and_tags_managers = [ ( m, m.GetTagsManager() ) for m in self._media ]
medias_and_sets_of_tags = [ ( m, tm.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ), tm.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ), tm.GetPetitioned( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) ) for ( m, tm ) in medias_and_tags_managers ]
for tag in tags:
if choice_action == HC.CONTENT_UPDATE_ADD: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag not in mc ]
elif choice_action == HC.CONTENT_UPDATE_DELETE: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mc ]
elif choice_action == HC.CONTENT_UPDATE_PEND: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag not in mc and tag not in mp ]
elif choice_action == HC.CONTENT_UPDATE_PETITION: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mc and tag not in mpt ]
elif choice_action == HC.CONTENT_UPDATE_RESCIND_PEND: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mp ]
elif choice_action == HC.CONTENT_UPDATE_RESCIND_PETITION: media_to_affect = [ m for ( m, mc, mp, mpt ) in medias_and_sets_of_tags if tag in mpt ]
hashes = set( itertools.chain.from_iterable( ( m.GetHashes() for m in media_to_affect ) ) )
if len( hashes ) > 0:
content_updates = []
if choice_action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_PEND ):
recent_tags.add( tag )
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, choice_action, ( tag, hashes ), reason = reason ) )
if len( content_updates ) > 0:
if not self._immediate_commit:
for m in media_to_affect:
mt = m.GetTagsManager()
for content_update in content_updates:
mt.ProcessContentUpdate( self._tag_service_key, content_update )
content_updates_group.extend( content_updates )
num_recent_tags = HG.client_controller.new_options.GetNoneableInteger( 'num_recent_tags' )
if len( recent_tags ) > 0 and num_recent_tags is not None:
if len( recent_tags ) > num_recent_tags:
recent_tags = random.sample( recent_tags, num_recent_tags )
HG.client_controller.Write( 'push_recent_tags', self._tag_service_key, recent_tags )
if len( content_updates_group ) > 0:
if self._immediate_commit:
service_keys_to_content_updates = { self._tag_service_key : content_updates_group }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
else:
self._groups_of_content_updates.append( content_updates_group )
self._suggested_tags.MediaUpdated()
self._tags_box.SetTagsByMedia( self._media )
def _MigrateTags( self ):
hashes = set()
for m in self._media:
hashes.update( m.GetHashes() )
def do_it( tag_service_key, hashes ):
tlw = HG.client_controller.GetMainTLW()
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( tlw, 'migrate tags' )
panel = ClientGUIScrolledPanelsReview.MigrateTagsPanel( frame, self._tag_service_key, hashes )
frame.SetPanel( panel )
QP.CallAfter( do_it, self._tag_service_key, hashes )
self.OK()
def _Copy( self ):
tags = list( self._tags_box.GetSelectedTags() )
if len( tags ) == 0:
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( self._media, self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE )
tags = set( current_tags_to_count.keys() ).union( pending_tags_to_count.keys() )
if len( tags ) > 0:
tags = HydrusTags.SortNumericTags( tags )
text = os.linesep.join( tags )
HG.client_controller.pub( 'clipboard', 'text', text )
def _FlipShowDeleted( self ):
self._show_deleted = not self._show_deleted
self._tags_box.SetShow( 'deleted', self._show_deleted )
def _ModifyMappers( self ):
contents = []
tags = self._tags_box.GetSelectedTags()
if len( tags ) == 0:
QW.QMessageBox.information( self, 'No tags selected!', 'Please select some tags first!' )
return
hashes_and_current_tags = [ ( m.GetHashes(), m.GetTagsManager().GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) ) for m in self._media ]
for tag in tags:
hashes_iter = itertools.chain.from_iterable( ( hashes for ( hashes, current_tags ) in hashes_and_current_tags if tag in current_tags ) )
contents.extend( [ HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPING, ( tag, hash ) ) for hash in hashes_iter ] )
if len( contents ) > 0:
subject_account_identifiers = [ HydrusNetwork.AccountIdentifier( content = content ) for content in contents ]
frame = ClientGUITopLevelWindowsPanels.FrameThatTakesScrollablePanel( self.window().parentWidget(), 'manage accounts' )
panel = ClientGUIHydrusNetwork.ModifyAccountsPanel( frame, self._tag_service_key, subject_account_identifiers )
frame.SetPanel( panel )
def _Paste( self ):
try:
text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.warning( self, 'Warning', str(e) )
return
try:
tags = HydrusText.DeserialiseNewlinedTexts( text )
tags = HydrusTags.CleanTags( tags )
self.AddTags( tags, only_add = True )
except Exception as e:
QW.QMessageBox.warning( self, 'Warning', 'I could not understand what was in the clipboard' )
def _RemoveTagsButton( self ):
tags_managers = [ m.GetTagsManager() for m in self._media ]
removable_tags = set()
for tags_manager in tags_managers:
removable_tags.update( tags_manager.GetCurrent( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) )
removable_tags.update( tags_manager.GetPending( self._tag_service_key, ClientTags.TAG_DISPLAY_STORAGE ) )
selected_tags = list( self._tags_box.GetSelectedTags() )
if len( selected_tags ) == 0:
tags_to_remove = list( removable_tags )
else:
tags_to_remove = [ tag for tag in selected_tags if tag in removable_tags ]
tags_to_remove = HydrusTags.SortNumericTags( tags_to_remove )
self.RemoveTags( tags_to_remove )
def AddTags( self, tags, only_add = False ):
if not self._new_options.GetBoolean( 'allow_remove_on_manage_tags_input' ):
only_add = True
if len( tags ) > 0:
self.EnterTags( tags, only_add = only_add )
def CleanBeforeDestroy( self ):
self._add_tag_box.CancelCurrentResultsFetchJob()
def ClearMedia( self ):
self.SetMedia( set() )
def EnterTags( self, tags, only_add = False ):
if len( tags ) > 0:
self._EnterTags( tags, only_add = only_add )
def GetGroupsOfContentUpdates( self ):
return ( self._tag_service_key, self._groups_of_content_updates )
def HasChanges( self ):
return len( self._groups_of_content_updates ) > 0
def OK( self ):
self.okSignal.emit()
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
data = command.GetData()
if command.IsSimpleCommand():
action = data
if action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self.SetTagBoxFocus()
elif action in ( CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_FAVOURITE_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_RELATED_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_FILE_LOOKUP_SCRIPT_TAGS, CAC.SIMPLE_SHOW_AND_FOCUS_MANAGE_TAGS_RECENT_TAGS ):
self._suggested_tags.TakeFocusForUser( action )
elif action == CAC.SIMPLE_REFRESH_RELATED_TAGS:
self._suggested_tags.RefreshRelatedThorough()
else:
command_processed = False
else:
command_processed = False
return command_processed
def ProcessContentUpdates( self, service_keys_to_content_updates ):
for ( service_key, content_updates ) in list(service_keys_to_content_updates.items()):
for content_update in content_updates:
for m in self._media:
if HydrusData.SetsIntersect( m.GetHashes(), content_update.GetHashes() ):
m.GetMediaResult().ProcessContentUpdate( service_key, content_update )
self._tags_box.SetTagsByMedia( self._media )
self._suggested_tags.MediaUpdated()
def RemoveTags( self, tags ):
if len( tags ) > 0:
if self._new_options.GetBoolean( 'yes_no_on_remove_on_manage_tags' ):
if len( tags ) < 10:
message = 'Are you sure you want to remove these tags:'
message += os.linesep * 2
message += os.linesep.join( ( HydrusText.ElideText( tag, 64 ) for tag in tags ) )
else:
message = 'Are you sure you want to remove these ' + HydrusData.ToHumanInt( len( tags ) ) + ' tags?'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
self._EnterTags( tags, only_remove = True )
def SetMedia( self, media ):
if media is None:
media = set()
self._media = media
self._tags_box.SetTagsByMedia( self._media )
self._suggested_tags.SetMedia( media )
def SetTagBoxFocus( self ):
self._add_tag_box.setFocus( QC.Qt.OtherFocusReason )
class ManageTagParents( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, tags = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
services = list( HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, ) ) )
services.extend( [ service for service in HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) ) if service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ) ] )
for service in services:
name = service.GetName()
service_key = service.GetServiceKey()
page = self._Panel( self._tag_repositories, service_key, tags )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentWidget( page )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CommitChanges( self ):
service_keys_to_content_updates = {}
for page in self._tag_repositories.GetPages():
( service_key, content_updates ) = page.GetContentUpdates()
if len( content_updates ) > 0:
service_keys_to_content_updates[ service_key ] = content_updates
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )
def UserIsOKToOK( self ):
if self._tag_repositories.currentWidget().HasUncommittedPair():
message = 'Are you sure you want to OK? You have an uncommitted pair.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key, tags = None ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._service = HG.client_controller.services_manager.GetService( self._service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._pairs_to_reasons = {}
self._original_statuses_to_pairs = collections.defaultdict( set )
self._current_statuses_to_pairs = collections.defaultdict( set )
self._show_all = QW.QCheckBox( self )
listctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
self._tag_parents = ClientGUIListCtrl.BetterListCtrl( listctrl_panel, CGLC.COLUMN_LIST_TAG_PARENTS.ID, 8, self._ConvertPairToListCtrlTuples, delete_key_callback = self._ListCtrlActivated, activation_callback = self._ListCtrlActivated )
listctrl_panel.SetListCtrl( self._tag_parents )
self._tag_parents.Sort()
menu_items = []
menu_items.append( ( 'normal', 'from clipboard', 'Load parents from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, False ) ) )
menu_items.append( ( 'normal', 'from clipboard (only add pairs--no deletions)', 'Load parents from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, True ) ) )
menu_items.append( ( 'normal', 'from .txt file', 'Load parents from a .txt file.', HydrusData.Call( self._ImportFromTXT, False ) ) )
menu_items.append( ( 'normal', 'from .txt file (only add pairs--no deletions)', 'Load parents from a .txt file.', HydrusData.Call( self._ImportFromTXT, True ) ) )
listctrl_panel.AddMenuButton( 'import', menu_items )
menu_items = []
menu_items.append( ( 'normal', 'to clipboard', 'Save selected parents to your clipboard.', self._ExportToClipboard ) )
menu_items.append( ( 'normal', 'to .txt file', 'Save selected parents to a .txt file.', self._ExportToTXT ) )
listctrl_panel.AddMenuButton( 'export', menu_items, enabled_only_on_selection = True )
self._children = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
self._parents = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
( gumpf, preview_height ) = ClientGUIFunctions.ConvertTextToPixels( self._children, ( 12, 6 ) )
self._children.setMinimumHeight( preview_height )
self._parents.setMinimumHeight( preview_height )
self._child_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterChildren, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._child_input.setEnabled( False )
self._parent_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterParents, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._parent_input.setEnabled( False )
self._add = QW.QPushButton( 'add', self )
self._add.clicked.connect( self.EventAddButton )
self._add.setEnabled( False )
self._status_st = ClientGUICommon.BetterStaticText( self, 'initialising\u2026' + os.linesep + '.' )
self._sync_status_st = ClientGUICommon.BetterStaticText( self, '' )
self._sync_status_st.setWordWrap( True )
self._count_st = ClientGUICommon.BetterStaticText( self, '' )
children_vbox = QP.VBoxLayout()
QP.AddToLayout( children_vbox, ClientGUICommon.BetterStaticText( self, label = 'set children' ), CC.FLAGS_CENTER )
QP.AddToLayout( children_vbox, self._children, CC.FLAGS_EXPAND_BOTH_WAYS )
parents_vbox = QP.VBoxLayout()
QP.AddToLayout( parents_vbox, ClientGUICommon.BetterStaticText( self, label = 'set parents' ), CC.FLAGS_CENTER )
QP.AddToLayout( parents_vbox, self._parents, CC.FLAGS_EXPAND_BOTH_WAYS )
tags_box = QP.HBoxLayout()
QP.AddToLayout( tags_box, children_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( tags_box, parents_vbox, CC.FLAGS_EXPAND_BOTH_WAYS )
input_box = QP.HBoxLayout()
QP.AddToLayout( input_box, self._child_input, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( input_box, self._parent_input, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._count_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._show_all,self,'show all pairs'), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, listctrl_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, tags_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( vbox, input_box, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
self._tag_parents.itemSelectionChanged.connect( self._SetButtonStatus )
self._children.listBoxChanged.connect( self._UpdateListCtrlData )
self._parents.listBoxChanged.connect( self._UpdateListCtrlData )
self._show_all.clicked.connect( self._UpdateListCtrlData )
HG.client_controller.CallToThread( self.THREADInitialise, tags, self._service_key )
def _AddPairs( self, pairs, add_only = False ):
pairs = list( pairs )
pairs.sort( key = lambda c_p: HydrusTags.ConvertTagToSortable( c_p[1] ) )
new_pairs = []
current_pairs = []
petitioned_pairs = []
pending_pairs = []
for pair in pairs:
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
if not add_only:
pending_pairs.append( pair )
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
petitioned_pairs.append( pair )
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
if not add_only:
current_pairs.append( pair )
elif self._CanAdd( pair ):
new_pairs.append( pair )
affected_pairs = []
if len( new_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if self._service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( new_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in new_pairs ) )
message = 'Enter a reason for:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'To be added. A janitor will review your request.'
suggestions = []
suggestions.append( 'obvious by definition (a sword is a weapon)' )
suggestions.append( 'character/series/studio/etc... belonging (character x belongs to series y)' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in new_pairs: self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].update( new_pairs )
affected_pairs.extend( new_pairs )
else:
if len( current_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if len( current_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in current_pairs ) )
if len( current_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Already exist.'
else:
message = 'The pair ' + pair_strings + ' already exists.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'petition to remove', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
if self._service.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
message = 'Enter a reason for:'
message += os.linesep * 2
message += pair_strings
message += os.linesep * 2
message += 'to be removed. A janitor will review your petition.'
suggestions = []
suggestions.append( 'obvious typo/mistake' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in current_pairs: self._pairs_to_reasons[ pair ] = reason
else:
do_it = False
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].update( current_pairs )
affected_pairs.extend( current_pairs )
if len( pending_pairs ) > 0:
if len( pending_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in pending_pairs ) )
if len( pending_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are pending.'
else:
message = 'The pair ' + pair_strings + ' is pending.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the pend', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].difference_update( pending_pairs )
affected_pairs.extend( pending_pairs )
if len( petitioned_pairs ) > 0:
if len( petitioned_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( child + '->' + parent for ( child, parent ) in petitioned_pairs ) )
if len( petitioned_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are petitioned.'
else:
message = 'The pair ' + pair_strings + ' is petitioned.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the petition', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].difference_update( petitioned_pairs )
affected_pairs.extend( petitioned_pairs )
if len( affected_pairs ) > 0:
def in_current( pair ):
for status in ( HC.CONTENT_STATUS_CURRENT, HC.CONTENT_STATUS_PENDING, HC.CONTENT_STATUS_PETITIONED ):
if pair in self._current_statuses_to_pairs[ status ]:
return True
return False
affected_pairs = [ ( self._tag_parents.HasData( pair ), in_current( pair ), pair ) for pair in affected_pairs ]
to_add = [ pair for ( exists, current, pair ) in affected_pairs if not exists ]
to_update = [ pair for ( exists, current, pair ) in affected_pairs if exists and current ]
to_delete = [ pair for ( exists, current, pair ) in affected_pairs if exists and not current ]
self._tag_parents.AddDatas( to_add )
self._tag_parents.UpdateDatas( to_update )
self._tag_parents.DeleteDatas( to_delete )
self._tag_parents.Sort()
def _CanAdd( self, potential_pair ):
( potential_child, potential_parent ) = potential_pair
if potential_child == potential_parent: return False
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_children = { child for ( child, parent ) in current_pairs }
if potential_parent in current_children:
simple_children_to_parents = ClientManagers.BuildSimpleChildrenToParents( current_pairs )
if ClientManagers.LoopInSimpleChildrenToParents( simple_children_to_parents, potential_child, potential_parent ):
QW.QMessageBox.critical( self, 'Error', 'Adding '+potential_child+'->'+potential_parent+' would create a loop!' )
return False
return True
def _ConvertPairToListCtrlTuples( self, pair ):
( child, parent ) = pair
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
status = HC.CONTENT_STATUS_PENDING
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
status = HC.CONTENT_STATUS_PETITIONED
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
status = HC.CONTENT_STATUS_CURRENT
sign = HydrusData.ConvertStatusToPrefix( status )
pretty_status = sign
display_tuple = ( pretty_status, child, parent )
sort_tuple = ( status, child, parent )
return ( display_tuple, sort_tuple )
def _DeserialiseImportString( self, import_string ):
tags = HydrusText.DeserialiseNewlinedTexts( import_string )
if len( tags ) % 2 == 1:
raise Exception( 'Uneven number of tags found!' )
pairs = []
for i in range( len( tags ) // 2 ):
pair = ( tags[ 2 * i ], tags[ ( 2 * i ) + 1 ] )
pairs.append( pair )
return pairs
def _ExportToClipboard( self ):
export_string = self._GetExportString()
HG.client_controller.pub( 'clipboard', 'text', export_string )
def _ExportToTXT( self ):
export_string = self._GetExportString()
with QP.FileDialog( self, 'Set the export path.', default_filename = 'parents.txt', acceptMode = QW.QFileDialog.AcceptSave, fileMode = QW.QFileDialog.AnyFile ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
with open( path, 'w', encoding = 'utf-8' ) as f:
f.write( export_string )
def _GetExportString( self ):
tags = []
for ( a, b ) in self._tag_parents.GetData( only_selected = True ):
tags.append( a )
tags.append( b )
export_string = os.linesep.join( tags )
return export_string
def _ImportFromClipboard( self, add_only = False ):
try:
import_string = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
pairs = self._DeserialiseImportString( import_string )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ImportFromTXT( self, add_only = False ):
with QP.FileDialog( self, 'Select the file to import.', acceptMode = QW.QFileDialog.AcceptOpen ) as dlg:
if dlg.exec() != QW.QDialog.Accepted:
return
else:
path = dlg.GetPath()
with open( path, 'r', encoding = 'utf-8' ) as f:
import_string = f.read()
pairs = self._DeserialiseImportString( import_string )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ListCtrlActivated( self ):
parents_to_children = collections.defaultdict( set )
pairs = self._tag_parents.GetData( only_selected = True )
if len( pairs ) > 0:
self._AddPairs( pairs )
def _SetButtonStatus( self ):
if len( self._children.GetTags() ) == 0 or len( self._parents.GetTags() ) == 0:
self._add.setEnabled( False )
else:
self._add.setEnabled( True )
def _UpdateListCtrlData( self ):
children = self._children.GetTags()
parents = self._parents.GetTags()
pertinent_tags = children.union( parents )
self._tag_parents.DeleteDatas( self._tag_parents.GetData() )
all_pairs = set()
show_all = self._show_all.isChecked()
for ( status, pairs ) in self._current_statuses_to_pairs.items():
if status == HC.CONTENT_STATUS_DELETED:
continue
if len( pertinent_tags ) == 0:
if status == HC.CONTENT_STATUS_CURRENT and not show_all:
continue
all_pairs.update( pairs )
else:
for pair in pairs:
( a, b ) = pair
if a in pertinent_tags or b in pertinent_tags or show_all:
all_pairs.add( pair )
self._tag_parents.AddDatas( all_pairs )
self._tag_parents.Sort()
def EnterChildren( self, tags ):
if len( tags ) > 0:
self._parents.RemoveTags( tags )
self._children.EnterTags( tags )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EnterParents( self, tags ):
if len( tags ) > 0:
self._children.RemoveTags( tags )
self._parents.EnterTags( tags )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EventAddButton( self ):
children = self._children.GetTags()
parents = self._parents.GetTags()
pairs = list( itertools.product( children, parents ) )
self._AddPairs( pairs )
self._children.SetTags( [] )
self._parents.SetTags( [] )
self._UpdateListCtrlData()
self._SetButtonStatus()
def GetContentUpdates( self ):
content_updates = []
if self._i_am_local_tag_service:
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]: content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_ADD, pair ) )
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]: content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DELETE, pair ) )
else:
current_pending = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
original_pending = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
current_petitioned = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
original_petitioned = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
new_pends = current_pending.difference( original_pending )
rescinded_pends = original_pending.difference( current_pending )
new_petitions = current_petitioned.difference( original_petitioned )
rescinded_petitions = original_petitioned.difference( current_petitioned )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PEND, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_RESCIND_PEND, pair ) for pair in rescinded_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PETITION, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_petitions ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_RESCIND_PETITION, pair ) for pair in rescinded_petitions ) )
return ( self._service_key, content_updates )
def HasUncommittedPair( self ):
return len( self._children.GetTags() ) > 0 and len( self._parents.GetTags() ) > 0
def SetTagBoxFocus( self ):
if len( self._children.GetTags() ) == 0: self._child_input.setFocus( QC.Qt.OtherFocusReason )
else: self._parent_input.setFocus( QC.Qt.OtherFocusReason )
def THREADInitialise( self, tags, service_key ):
def qt_code( original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do ):
if not self or not QP.isValid( self ):
return
self._original_statuses_to_pairs = original_statuses_to_pairs
self._current_statuses_to_pairs = current_statuses_to_pairs
self._status_st.setText( 'Files with a tag on the left will also be given the tag on the right.' + os.linesep + 'As an experiment, this panel will only display the \'current\' pairs for those tags entered below.' )
looking_good = True
if len( service_keys_to_work_to_do ) == 0:
looking_good = False
status_text = 'No services currently apply these parents. Changes here will have no effect unless parent application is changed later.'
else:
synced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if not work_to_do ) )
unsynced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if work_to_do ) )
synced_string = ', '.join( ( '"{}"'.format( name ) for name in synced_names ) )
unsynced_string = ', '.join( ( '"{}"'.format( name ) for name in unsynced_names ) )
if len( unsynced_names ) == 0:
service_part = '{} apply these parents and are fully synced.'.format( synced_string )
else:
looking_good = False
if len( synced_names ) > 0:
service_part = '{} apply these parents and are fully synced, but {} still have work to do.'.format( synced_string, unsynced_string )
else:
service_part = '{} apply these parents and still have sync work to do.'.format( unsynced_string )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
maintenance_part = 'Parents are set to sync all the time in the background.'
if looking_good:
changes_part = 'Changes from this dialog should be reflected soon after closing the dialog.'
else:
changes_part = 'It may take some time for changes here to apply everywhere, though.'
else:
looking_good = False
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
maintenance_part = 'Parents are set to sync only when you are not using the client.'
changes_part = 'It may take some time for changes here to apply.'
else:
maintenance_part = 'Parents are not set to sync.'
changes_part = 'Changes here will not apply unless sync is manually forced to run.'
s = os.linesep * 2
status_text = s.join( ( service_part, maintenance_part, changes_part ) )
self._sync_status_st.setText( status_text )
if looking_good:
self._sync_status_st.setObjectName( 'HydrusValid' )
else:
self._sync_status_st.setObjectName( 'HydrusWarning' )
self._sync_status_st.style().polish( self._sync_status_st )
self._count_st.setText( 'Starting with '+HydrusData.ToHumanInt(len(original_statuses_to_pairs[HC.CONTENT_STATUS_CURRENT]))+' pairs.' )
self._child_input.setEnabled( True )
self._parent_input.setEnabled( True )
if tags is None:
self._UpdateListCtrlData()
else:
self.EnterChildren( tags )
original_statuses_to_pairs = HG.client_controller.Read( 'tag_parents', service_key )
( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ) = HG.client_controller.Read( 'tag_display_application' )
service_keys_we_care_about = { s_k for ( s_k, s_ks ) in master_service_keys_to_parent_applicable_service_keys.items() if service_key in s_ks }
service_keys_to_work_to_do = {}
for s_k in service_keys_we_care_about:
status = HG.client_controller.Read( 'tag_display_maintenance_status', s_k )
work_to_do = status[ 'num_parents_to_sync' ] > 0
service_keys_to_work_to_do[ s_k ] = work_to_do
current_statuses_to_pairs = collections.defaultdict( set )
current_statuses_to_pairs.update( { key : set( value ) for ( key, value ) in list(original_statuses_to_pairs.items()) } )
QP.CallAfter( qt_code, original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do )
class ManageTagSiblings( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, tags = None ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._tag_repositories = ClientGUICommon.BetterNotebook( self )
#
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
services = list( HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, ) ) )
services.extend( [ service for service in HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) ) if service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ) ] )
for service in services:
name = service.GetName()
service_key = service.GetServiceKey()
page = self._Panel( self._tag_repositories, service_key, tags )
select = service_key == default_tag_repository_key
self._tag_repositories.addTab( page, name )
if select: self._tag_repositories.setCurrentIndex( self._tag_repositories.indexOf( page ) )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
def _SetSearchFocus( self ):
page = self._tag_repositories.currentWidget()
if page is not None:
page.SetTagBoxFocus()
def CommitChanges( self ):
service_keys_to_content_updates = {}
for page in self._tag_repositories.GetPages():
( service_key, content_updates ) = page.GetContentUpdates()
if len( content_updates ) > 0:
service_keys_to_content_updates[ service_key ] = content_updates
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )
def UserIsOKToOK( self ):
if self._tag_repositories.currentWidget().HasUncommittedPair():
message = 'Are you sure you want to OK? You have an uncommitted pair.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
def EventServiceChanged( self, event ):
page = self._tag_repositories.currentWidget()
if page is not None:
HG.client_controller.CallAfterQtSafe( page, page.SetTagBoxFocus )
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key, tags = None ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._service = HG.client_controller.services_manager.GetService( self._service_key )
self._i_am_local_tag_service = self._service.GetServiceType() == HC.LOCAL_TAG
self._original_statuses_to_pairs = collections.defaultdict( set )
self._current_statuses_to_pairs = collections.defaultdict( set )
self._pairs_to_reasons = {}
self._current_new = None
self._show_all = QW.QCheckBox( self )
listctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
self._tag_siblings = ClientGUIListCtrl.BetterListCtrl( listctrl_panel, CGLC.COLUMN_LIST_TAG_SIBLINGS.ID, 8, self._ConvertPairToListCtrlTuples, delete_key_callback = self._ListCtrlActivated, activation_callback = self._ListCtrlActivated )
listctrl_panel.SetListCtrl( self._tag_siblings )
self._tag_siblings.Sort()
menu_items = []
menu_items.append( ( 'normal', 'from clipboard', 'Load siblings from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, False ) ) )
menu_items.append( ( 'normal', 'from clipboard (only add pairs--no deletions)', 'Load siblings from text in your clipboard.', HydrusData.Call( self._ImportFromClipboard, True ) ) )
menu_items.append( ( 'normal', 'from .txt file', 'Load siblings from a .txt file.', HydrusData.Call( self._ImportFromTXT, False ) ) )
menu_items.append( ( 'normal', 'from .txt file (only add pairs--no deletions)', 'Load siblings from a .txt file.', HydrusData.Call( self._ImportFromTXT, True ) ) )
listctrl_panel.AddMenuButton( 'import', menu_items )
menu_items = []
menu_items.append( ( 'normal', 'to clipboard', 'Save selected siblings to your clipboard.', self._ExportToClipboard ) )
menu_items.append( ( 'normal', 'to .txt file', 'Save selected siblings to a .txt file.', self._ExportToTXT ) )
listctrl_panel.AddMenuButton( 'export', menu_items, enabled_only_on_selection = True )
self._old_siblings = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self, self._service_key, ClientTags.TAG_DISPLAY_ACTUAL )
self._new_sibling = ClientGUICommon.BetterStaticText( self )
( gumpf, preview_height ) = ClientGUIFunctions.ConvertTextToPixels( self._old_siblings, ( 12, 6 ) )
self._old_siblings.setMinimumHeight( preview_height )
self._old_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.EnterOlds, CC.LOCAL_FILE_SERVICE_KEY, service_key, show_paste_button = True )
self._old_input.setEnabled( False )
self._new_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self, self.SetNew, CC.LOCAL_FILE_SERVICE_KEY, service_key )
self._new_input.setEnabled( False )
self._add = QW.QPushButton( 'add', self )
self._add.clicked.connect( self.EventAddButton )
self._add.setEnabled( False )
#
self._status_st = ClientGUICommon.BetterStaticText( self, 'initialising\u2026' )
self._sync_status_st = ClientGUICommon.BetterStaticText( self, '' )
self._sync_status_st.setWordWrap( True )
self._count_st = ClientGUICommon.BetterStaticText( self, '' )
old_sibling_box = QP.VBoxLayout()
QP.AddToLayout( old_sibling_box, ClientGUICommon.BetterStaticText( self, label = 'set tags to be replaced' ), CC.FLAGS_CENTER )
QP.AddToLayout( old_sibling_box, self._old_siblings, CC.FLAGS_EXPAND_BOTH_WAYS )
new_sibling_box = QP.VBoxLayout()
QP.AddToLayout( new_sibling_box, ClientGUICommon.BetterStaticText( self, label = 'set new ideal tag' ), CC.FLAGS_CENTER )
new_sibling_box.addStretch( 1 )
QP.AddToLayout( new_sibling_box, self._new_sibling, CC.FLAGS_EXPAND_PERPENDICULAR )
new_sibling_box.addStretch( 1 )
text_box = QP.HBoxLayout()
QP.AddToLayout( text_box, old_sibling_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( text_box, new_sibling_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
input_box = QP.HBoxLayout()
QP.AddToLayout( input_box, self._old_input )
QP.AddToLayout( input_box, self._new_input )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._count_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._show_all,self,'show all pairs'), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, listctrl_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._add, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, text_box, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
QP.AddToLayout( vbox, input_box, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
#
self._tag_siblings.itemSelectionChanged.connect( self._SetButtonStatus )
self._show_all.clicked.connect( self._UpdateListCtrlData )
self._old_siblings.listBoxChanged.connect( self._UpdateListCtrlData )
HG.client_controller.CallToThread( self.THREADInitialise, tags, self._service_key )
def _AddPairs( self, pairs, add_only = False, remove_only = False, default_reason = None ):
pairs = list( pairs )
pairs.sort( key = lambda c_p1: HydrusTags.ConvertTagToSortable( c_p1[1] ) )
new_pairs = []
current_pairs = []
petitioned_pairs = []
pending_pairs = []
for pair in pairs:
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
if not add_only:
pending_pairs.append( pair )
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
if not remove_only:
petitioned_pairs.append( pair )
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
if not add_only:
current_pairs.append( pair )
elif not remove_only and self._CanAdd( pair ):
new_pairs.append( pair )
if len( new_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if default_reason is not None:
reason = default_reason
elif self._service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( new_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in new_pairs ) )
suggestions = []
suggestions.append( 'merging underscores/typos/phrasing/unnamespaced to a single uncontroversial good tag' )
suggestions.append( 'rewording/namespacing based on preference' )
message = 'Enter a reason for:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'To be added. A janitor will review your petition.'
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in new_pairs: self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].update( new_pairs )
else:
if len( current_pairs ) > 0:
do_it = True
if not self._i_am_local_tag_service:
if default_reason is not None:
reason = default_reason
elif self._service.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_MODERATE ):
reason = 'admin'
else:
if len( current_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in current_pairs ) )
message = 'Enter a reason for:'
message += os.linesep * 2
message += pair_strings
message += os.linesep * 2
message += 'to be removed. You will see the delete as soon as you upload, but a janitor will review your petition to decide if all users should receive it as well.'
suggestions = []
suggestions.append( 'obvious typo/mistake' )
suggestions.append( 'disambiguation' )
suggestions.append( 'correcting to repository standard' )
with ClientGUIDialogs.DialogTextEntry( self, message, suggestions = suggestions ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
else:
do_it = False
if do_it:
for pair in current_pairs:
self._pairs_to_reasons[ pair ] = reason
if do_it:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].update( current_pairs )
if len( pending_pairs ) > 0:
if len( pending_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = os.linesep.join( ( old + '->' + new for ( old, new ) in pending_pairs ) )
if len( pending_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are pending.'
else:
message = 'The pair ' + pair_strings + ' is pending.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the pend', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ].difference_update( pending_pairs )
if len( petitioned_pairs ) > 0:
if len( petitioned_pairs ) > 10:
pair_strings = 'The many pairs you entered.'
else:
pair_strings = ', '.join( ( old + '->' + new for ( old, new ) in petitioned_pairs ) )
if len( petitioned_pairs ) > 1:
message = 'The pairs:' + os.linesep * 2 + pair_strings + os.linesep * 2 + 'Are petitioned.'
else:
message = 'The pair ' + pair_strings + ' is petitioned.'
result = ClientGUIDialogsQuick.GetYesNo( self, message, title = 'Choose what to do.', yes_label = 'rescind the petition', no_label = 'do nothing' )
if result == QW.QDialog.Accepted:
self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ].difference_update( petitioned_pairs )
def _AutoPetitionConflicts( self, pairs ):
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_olds_to_news = dict( current_pairs )
current_olds = { current_old for ( current_old, current_new ) in current_pairs }
pairs_to_auto_petition = set()
for ( old, new ) in pairs:
if old in current_olds:
conflicting_new = current_olds_to_news[ old ]
if conflicting_new != new:
conflicting_pair = ( old, conflicting_new )
pairs_to_auto_petition.add( conflicting_pair )
if len( pairs_to_auto_petition ) > 0:
pairs_to_auto_petition = list( pairs_to_auto_petition )
self._AddPairs( pairs_to_auto_petition, remove_only = True, default_reason = 'AUTO-PETITION TO REASSIGN TO: ' + new )
def _CanAdd( self, potential_pair ):
( potential_old, potential_new ) = potential_pair
current_pairs = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] ).difference( self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ] )
current_olds = { old for ( old, new ) in current_pairs }
# test for ambiguity
if potential_old in current_olds:
QW.QMessageBox.critical( self, 'Error', 'There already is a relationship set for the tag '+potential_old+'.' )
return False
# test for loops
if potential_new in current_olds:
seen_tags = set()
d = dict( current_pairs )
next_new = potential_new
while next_new in d:
next_new = d[ next_new ]
if next_new == potential_old:
QW.QMessageBox.critical( self, 'Error', 'Adding '+potential_old+'->'+potential_new+' would create a loop!' )
return False
if next_new in seen_tags:
message = 'The pair you mean to add seems to connect to a sibling loop already in your database! Please undo this loop first. The tags involved in the loop are:'
message += os.linesep * 2
message += ', '.join( seen_tags )
QW.QMessageBox.critical( self, 'Error', message )
return False
seen_tags.add( next_new )
return True
def _ConvertPairToListCtrlTuples( self, pair ):
( old, new ) = pair
if pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
status = HC.CONTENT_STATUS_PENDING
elif pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
status = HC.CONTENT_STATUS_PETITIONED
elif pair in self._original_statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ]:
status = HC.CONTENT_STATUS_CURRENT
sign = HydrusData.ConvertStatusToPrefix( status )
pretty_status = sign
existing_olds = self._old_siblings.GetTags()
note = ''
if old in existing_olds:
if status == HC.CONTENT_STATUS_PENDING:
note = 'CONFLICT: Will be rescinded on add.'
elif status == HC.CONTENT_STATUS_CURRENT:
note = 'CONFLICT: Will be petitioned/deleted on add.'
display_tuple = ( pretty_status, old, new, note )
sort_tuple = ( status, old, new, note )
return ( display_tuple, sort_tuple )
def _DeserialiseImportString( self, import_string ):
tags = HydrusText.DeserialiseNewlinedTexts( import_string )
if len( tags ) % 2 == 1:
raise Exception( 'Uneven number of tags found!' )
pairs = []
for i in range( len( tags ) // 2 ):
pair = ( tags[ 2 * i ], tags[ ( 2 * i ) + 1 ] )
pairs.append( pair )
return pairs
def _ExportToClipboard( self ):
export_string = self._GetExportString()
HG.client_controller.pub( 'clipboard', 'text', export_string )
def _ExportToTXT( self ):
export_string = self._GetExportString()
with QP.FileDialog( self, 'Set the export path.', default_filename = 'siblings.txt', acceptMode = QW.QFileDialog.AcceptSave, fileMode = QW.QFileDialog.AnyFile ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
with open( path, 'w', encoding = 'utf-8' ) as f:
f.write( export_string )
def _GetExportString( self ):
tags = []
for ( a, b ) in self._tag_siblings.GetData( only_selected = True ):
tags.append( a )
tags.append( b )
export_string = os.linesep.join( tags )
return export_string
def _ImportFromClipboard( self, add_only = False ):
try:
import_string = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
pairs = self._DeserialiseImportString( import_string )
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ImportFromTXT( self, add_only = False ):
with QP.FileDialog( self, 'Select the file to import.', acceptMode = QW.QFileDialog.AcceptOpen ) as dlg:
if dlg.exec() != QW.QDialog.Accepted:
return
else:
path = dlg.GetPath()
with open( path, 'r', encoding = 'utf-8' ) as f:
import_string = f.read()
pairs = self._DeserialiseImportString( import_string )
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs, add_only = add_only )
self._UpdateListCtrlData()
def _ListCtrlActivated( self ):
pairs = self._tag_siblings.GetData( only_selected = True )
if len( pairs ) > 0:
self._AddPairs( pairs )
self._UpdateListCtrlData()
def _SetButtonStatus( self ):
if self._current_new is None or len( self._old_siblings.GetTags() ) == 0:
self._add.setEnabled( False )
else:
self._add.setEnabled( True )
def _UpdateListCtrlData( self ):
olds = self._old_siblings.GetTags()
pertinent_tags = set( olds )
if self._current_new is not None:
pertinent_tags.add( self._current_new )
self._tag_siblings.DeleteDatas( self._tag_siblings.GetData() )
all_pairs = set()
show_all = self._show_all.isChecked()
for ( status, pairs ) in self._current_statuses_to_pairs.items():
if status == HC.CONTENT_STATUS_DELETED:
continue
if len( pertinent_tags ) == 0:
if status == HC.CONTENT_STATUS_CURRENT and not show_all:
continue
# show all pending/petitioned
all_pairs.update( pairs )
else:
# show all appropriate
for pair in pairs:
( a, b ) = pair
if a in pertinent_tags or b in pertinent_tags or show_all:
all_pairs.add( pair )
self._tag_siblings.AddDatas( all_pairs )
self._tag_siblings.Sort()
def EnterOlds( self, olds ):
if self._current_new in olds:
self.SetNew( set() )
self._old_siblings.EnterTags( olds )
self._UpdateListCtrlData()
self._SetButtonStatus()
def EventAddButton( self ):
if self._current_new is not None and len( self._old_siblings.GetTags() ) > 0:
olds = self._old_siblings.GetTags()
pairs = [ ( old, self._current_new ) for old in olds ]
self._AutoPetitionConflicts( pairs )
self._AddPairs( pairs )
self._old_siblings.SetTags( set() )
self.SetNew( set() )
self._UpdateListCtrlData()
self._SetButtonStatus()
def GetContentUpdates( self ):
# we make it manually here because of the mass pending tags done (but not undone on a rescind) on a pending pair!
# we don't want to send a pend and then rescind it, cause that will spam a thousand bad tags and not undo it
content_updates = []
if self._i_am_local_tag_service:
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]:
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_ADD, pair ) )
for pair in self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]:
content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DELETE, pair ) )
else:
current_pending = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
original_pending = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ]
current_petitioned = self._current_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
original_petitioned = self._original_statuses_to_pairs[ HC.CONTENT_STATUS_PETITIONED ]
new_pends = current_pending.difference( original_pending )
rescinded_pends = original_pending.difference( current_pending )
new_petitions = current_petitioned.difference( original_petitioned )
rescinded_petitions = original_petitioned.difference( current_petitioned )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PEND, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_RESCIND_PEND, pair ) for pair in rescinded_pends ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PETITION, pair, reason = self._pairs_to_reasons[ pair ] ) for pair in new_petitions ) )
content_updates.extend( ( HydrusData.ContentUpdate( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_RESCIND_PETITION, pair ) for pair in rescinded_petitions ) )
return ( self._service_key, content_updates )
def HasUncommittedPair( self ):
return len( self._old_siblings.GetTags() ) > 0 and self._current_new is not None
def SetNew( self, new_tags ):
if len( new_tags ) == 0:
self._new_sibling.clear()
self._current_new = None
else:
new = list( new_tags )[0]
self._old_siblings.RemoveTags( { new } )
self._new_sibling.setText( new )
self._current_new = new
self._UpdateListCtrlData()
self._SetButtonStatus()
def SetTagBoxFocus( self ):
if len( self._old_siblings.GetTags() ) == 0:
self._old_input.setFocus( QC.Qt.OtherFocusReason )
else:
self._new_input.setFocus( QC.Qt.OtherFocusReason )
def THREADInitialise( self, tags, service_key ):
def qt_code( original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do ):
if not self or not QP.isValid( self ):
return
self._original_statuses_to_pairs = original_statuses_to_pairs
self._current_statuses_to_pairs = current_statuses_to_pairs
self._status_st.setText( 'Tags on the left will be appear as those on the right.' )
looking_good = True
if len( service_keys_to_work_to_do ) == 0:
looking_good = False
status_text = 'No services currently apply these siblings. Changes here will have no effect unless sibling application is changed later.'
else:
synced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if not work_to_do ) )
unsynced_names = sorted( ( HG.client_controller.services_manager.GetName( s_k ) for ( s_k, work_to_do ) in service_keys_to_work_to_do.items() if work_to_do ) )
synced_string = ', '.join( ( '"{}"'.format( name ) for name in synced_names ) )
unsynced_string = ', '.join( ( '"{}"'.format( name ) for name in unsynced_names ) )
if len( unsynced_names ) == 0:
service_part = '{} apply these siblings and are fully synced.'.format( synced_string )
else:
looking_good = False
if len( synced_names ) > 0:
service_part = '{} apply these siblings and are fully synced, but {} still have work to do.'.format( synced_string, unsynced_string )
else:
service_part = '{} apply these siblings but still have sync work to do.'.format( unsynced_string )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
maintenance_part = 'Siblings are set to sync all the time in the background.'
if looking_good:
changes_part = 'Changes from this dialog should be reflected soon after closing the dialog.'
else:
changes_part = 'It may take some time for changes here to apply everywhere, though.'
else:
looking_good = False
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
maintenance_part = 'Siblings are set to sync only when you are not using the client.'
changes_part = 'It may take some time for changes here to apply.'
else:
maintenance_part = 'Siblings are not set to sync.'
changes_part = 'Changes here will not apply unless sync is manually forced to run.'
s = os.linesep * 2
status_text = s.join( ( service_part, maintenance_part, changes_part ) )
self._sync_status_st.setText( status_text )
if looking_good:
self._sync_status_st.setObjectName( 'HydrusValid' )
else:
self._sync_status_st.setObjectName( 'HydrusWarning' )
self._sync_status_st.style().polish( self._sync_status_st )
self._count_st.setText( 'Starting with '+HydrusData.ToHumanInt(len(original_statuses_to_pairs[HC.CONTENT_STATUS_CURRENT]))+' pairs.' )
self._old_input.setEnabled( True )
self._new_input.setEnabled( True )
if tags is None:
self._UpdateListCtrlData()
else:
self.EnterOlds( tags )
original_statuses_to_pairs = HG.client_controller.Read( 'tag_siblings', service_key )
( master_service_keys_to_sibling_applicable_service_keys, master_service_keys_to_parent_applicable_service_keys ) = HG.client_controller.Read( 'tag_display_application' )
service_keys_we_care_about = { s_k for ( s_k, s_ks ) in master_service_keys_to_sibling_applicable_service_keys.items() if service_key in s_ks }
service_keys_to_work_to_do = {}
for s_k in service_keys_we_care_about:
status = HG.client_controller.Read( 'tag_display_maintenance_status', s_k )
work_to_do = status[ 'num_siblings_to_sync' ] > 0
service_keys_to_work_to_do[ s_k ] = work_to_do
current_statuses_to_pairs = collections.defaultdict( set )
current_statuses_to_pairs.update( { key : set( value ) for ( key, value ) in original_statuses_to_pairs.items() } )
QP.CallAfter( qt_code, original_statuses_to_pairs, current_statuses_to_pairs, service_keys_to_work_to_do )
class ReviewTagDisplayMaintenancePanel( ClientGUIScrolledPanels.ReviewPanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ReviewPanel.__init__( self, parent )
self._tag_services_notebook = ClientGUICommon.BetterNotebook( self )
min_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._tag_services_notebook, 100 )
self._tag_services_notebook.setMinimumWidth( min_width )
services = list( HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES ) )
select_service_key = services[0].GetServiceKey()
for service in services:
service_key = service.GetServiceKey()
name = service.GetName()
page = self._Panel( self._tag_services_notebook, service_key )
self._tag_services_notebook.addTab( page, name )
if service_key == select_service_key:
self._tag_services_notebook.setCurrentWidget( page )
vbox = QP.VBoxLayout()
message = 'Figuring out how tags should appear according to sibling and parent application rules takes time. When you set new rules, the changes do not happen immediately--the client catches up in the background. You can review current progress and force faster sync here.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
self._UpdateStatusText()
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
HG.client_controller.sub( self, '_UpdateStatusText', 'notify_new_menu_option' )
def _UpdateStatusText( self ):
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. If there is work to do here, it should be cleared out in real time as you watch.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. If there is work to do here, it should be cleared out when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is work to do here, you can force it now by clicking \'work now!\' button.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
class _Panel( QW.QWidget ):
def __init__( self, parent, service_key ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
self._siblings_and_parents_st = ClientGUICommon.BetterStaticText( self )
self._progress = ClientGUICommon.TextAndGauge( self )
self._refresh_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().refresh, self._StartRefresh )
self._go_faster_button = ClientGUICommon.BetterButton( self, 'work hard now!', self._SyncFaster )
button_hbox = QP.HBoxLayout()
QP.AddToLayout( button_hbox, self._refresh_button, CC.FLAGS_CENTER )
QP.AddToLayout( button_hbox, self._go_faster_button, CC.FLAGS_CENTER )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._siblings_and_parents_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._progress, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, button_hbox, CC.FLAGS_ON_RIGHT )
vbox.addStretch( 1 )
self.setLayout( vbox )
self._refresh_values_updater = self._InitialiseRefreshValuesUpdater()
HG.client_controller.sub( self, 'NotifyRefresh', 'notify_new_tag_display_sync_status' )
HG.client_controller.sub( self, '_StartRefresh', 'notify_new_tag_display_application' )
self._StartRefresh()
def _InitialiseRefreshValuesUpdater( self ):
service_key = self._service_key
def loading_callable():
self._progress.SetText( 'refreshing\u2026' )
self._refresh_button.setEnabled( False )
running_fast_and_button_is_slow = HG.client_controller.tag_display_maintenance_manager.CurrentlyGoingFaster( self._service_key ) and 'slow' in self._go_faster_button.text()
if not running_fast_and_button_is_slow:
self._go_faster_button.setEnabled( False )
def work_callable():
status = HG.client_controller.Read( 'tag_display_maintenance_status', service_key )
time.sleep( 0.1 )
return status
def publish_callable( result ):
status = result
num_siblings_to_sync = status[ 'num_siblings_to_sync' ]
num_parents_to_sync = status[ 'num_parents_to_sync' ]
num_items_to_regen = num_siblings_to_sync + num_parents_to_sync
if num_items_to_regen == 0:
message = 'All synced!'
elif num_parents_to_sync == 0:
message = '{} siblings to sync.'.format( HydrusData.ToHumanInt( num_siblings_to_sync ) )
elif num_siblings_to_sync == 0:
message = '{} parents to sync.'.format( HydrusData.ToHumanInt( num_parents_to_sync ) )
else:
message = '{} siblings and {} parents to sync.'.format( HydrusData.ToHumanInt( num_siblings_to_sync ), HydrusData.ToHumanInt( num_parents_to_sync ) )
self._siblings_and_parents_st.setText( message )
num_actual_rows = status[ 'num_actual_rows' ]
num_ideal_rows = status[ 'num_ideal_rows' ]
if num_items_to_regen == 0:
if num_ideal_rows == 0:
message = 'No siblings/parents applying to this service.'
else:
message = '{} rules, all synced!'.format( HydrusData.ToHumanInt( num_ideal_rows ) )
value = 1
range = 1
sync_possible = False
else:
value = None
range = None
if num_ideal_rows == 0:
message = 'Removing all siblings/parents, {} rules remaining.'.format( HydrusData.ToHumanInt( num_actual_rows ) )
else:
message = '{} rules applied now, moving to {}.'.format( HydrusData.ToHumanInt( num_actual_rows ), HydrusData.ToHumanInt( num_ideal_rows ) )
if num_actual_rows <= num_ideal_rows:
value = num_actual_rows
range = num_ideal_rows
sync_possible = True
self._progress.SetValue( message, value, range )
self._refresh_button.setEnabled( True )
self._go_faster_button.setVisible( sync_possible )
self._go_faster_button.setEnabled( sync_possible )
if HG.client_controller.tag_display_maintenance_manager.CurrentlyGoingFaster( self._service_key ):
self._go_faster_button.setText( 'slow down!' )
else:
if not HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._go_faster_button.setText( 'work now!' )
else:
self._go_faster_button.setText( 'work hard now!' )
return ClientGUIAsync.AsyncQtUpdater( self, loading_callable, work_callable, publish_callable )
def _StartRefresh( self ):
self._refresh_values_updater.update()
def _SyncFaster( self ):
HG.client_controller.tag_display_maintenance_manager.FlipSyncFaster( self._service_key )
self._StartRefresh()
def NotifyRefresh( self, service_key ):
if service_key == self._service_key:
self._StartRefresh()
class TagFilterButton( ClientGUICommon.BetterButton ):
def __init__( self, parent, message, tag_filter, only_show_blacklist = False, label_prefix = None ):
ClientGUICommon.BetterButton.__init__( self, parent, 'tag filter', self._EditTagFilter )
self._message = message
self._tag_filter = tag_filter
self._only_show_blacklist = only_show_blacklist
self._label_prefix = label_prefix
self._UpdateLabel()
def _EditTagFilter( self ):
if self._only_show_blacklist:
title = 'edit blacklist'
else:
title = 'edit tag filter'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
namespaces = HG.client_controller.network_engine.domain_manager.GetParserNamespaces()
panel = EditTagFilterPanel( dlg, self._tag_filter, only_show_blacklist = self._only_show_blacklist, namespaces = namespaces, message = self._message )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
self._tag_filter = panel.GetValue()
self._UpdateLabel()
def _UpdateLabel( self ):
if self._only_show_blacklist:
tt = self._tag_filter.ToBlacklistString()
else:
tt = self._tag_filter.ToPermittedString()
if self._label_prefix is not None:
tt = self._label_prefix + tt
button_text = HydrusText.ElideText( tt, 45 )
self.setText( button_text )
self.setToolTip( tt )
def GetValue( self ):
return self._tag_filter
def SetValue( self, tag_filter ):
self._tag_filter = tag_filter
self._UpdateLabel()
class TagSummaryGenerator( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_TAG_SUMMARY_GENERATOR
SERIALISABLE_NAME = 'Tag Summary Generator'
SERIALISABLE_VERSION = 2
def __init__( self, background_colour = None, text_colour = None, namespace_info = None, separator = None, example_tags = None, show = True ):
if background_colour is None:
background_colour = QG.QColor( 223, 227, 230, 255 )
if text_colour is None:
text_colour = QG.QColor( 1, 17, 26, 255 )
if namespace_info is None:
namespace_info = []
namespace_info.append( ( 'creator', '', ', ' ) )
namespace_info.append( ( 'series', '', ', ' ) )
namespace_info.append( ( 'title', '', ', ' ) )
if separator is None:
separator = ' - '
if example_tags is None:
example_tags = []
self._background_colour = background_colour
self._text_colour = text_colour
self._namespace_info = namespace_info
self._separator = separator
self._example_tags = list( example_tags )
self._show = show
self._UpdateNamespaceLookup()
def _GetSerialisableInfo( self ):
bc = self._background_colour
background_colour_rgba = [ bc.red(), bc.green(), bc.blue(), bc.alpha() ]
tc = self._text_colour
text_colour_rgba = [ tc.red(), tc.green(), tc.blue(), tc.alpha() ]
return ( background_colour_rgba, text_colour_rgba, self._namespace_info, self._separator, self._example_tags, self._show )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( background_rgba, text_rgba, self._namespace_info, self._separator, self._example_tags, self._show ) = serialisable_info
( r, g, b, a ) = background_rgba
self._background_colour = QG.QColor( r, g, b, a )
( r, g, b, a ) = text_rgba
self._text_colour = QG.QColor( r, g, b, a )
self._namespace_info = [ tuple( row ) for row in self._namespace_info ]
self._UpdateNamespaceLookup()
def _UpdateNamespaceLookup( self ):
self._interesting_namespaces = { namespace for ( namespace, prefix, separator ) in self._namespace_info }
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( namespace_info, separator, example_tags ) = old_serialisable_info
background_rgba = ( 223, 227, 230, 255 )
text_rgba = ( 1, 17, 26, 255 )
show = True
new_serialisable_info = ( background_rgba, text_rgba, namespace_info, separator, example_tags, show )
return ( 2, new_serialisable_info )
def GenerateExampleSummary( self ):
if not self._show:
return 'not showing'
else:
return self.GenerateSummary( self._example_tags )
def GenerateSummary( self, tags, max_length = None ):
if not self._show:
return ''
namespaces_to_subtags = collections.defaultdict( list )
for tag in tags:
( namespace, subtag ) = HydrusTags.SplitTag( tag )
if namespace in self._interesting_namespaces:
namespaces_to_subtags[ namespace ].append( subtag )
for ( namespace, unsorted_l ) in list( namespaces_to_subtags.items() ):
sorted_l = HydrusTags.SortNumericTags( unsorted_l )
sorted_l = HydrusTags.CollapseMultipleSortedNumericTagsToMinMax( sorted_l )
namespaces_to_subtags[ namespace ] = sorted_l
namespace_texts = []
for ( namespace, prefix, separator ) in self._namespace_info:
subtags = namespaces_to_subtags[ namespace ]
if len( subtags ) > 0:
namespace_text = prefix + separator.join( namespaces_to_subtags[ namespace ] )
namespace_texts.append( namespace_text )
summary = self._separator.join( namespace_texts )
if max_length is not None:
summary = summary[:max_length]
return summary
def GetBackgroundColour( self ):
return self._background_colour
def GetTextColour( self ):
return self._text_colour
def ToTuple( self ):
return ( self._background_colour, self._text_colour, self._namespace_info, self._separator, self._example_tags, self._show )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_TAG_SUMMARY_GENERATOR ] = TagSummaryGenerator
class EditTagSummaryGeneratorPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent: QW.QWidget, tag_summary_generator: TagSummaryGenerator ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
show_panel = ClientGUICommon.StaticBox( self, 'shows' )
self._show = QW.QCheckBox( show_panel )
edit_panel = ClientGUICommon.StaticBox( self, 'edit' )
self._background_colour = ClientGUICommon.AlphaColourControl( edit_panel )
self._text_colour = ClientGUICommon.AlphaColourControl( edit_panel )
self._namespaces_listbox = ClientGUIListBoxes.QueueListBox( edit_panel, 8, self._ConvertNamespaceToListBoxString, self._AddNamespaceInfo, self._EditNamespaceInfo )
self._separator = QW.QLineEdit( edit_panel )
example_panel = ClientGUICommon.StaticBox( self, 'example' )
self._example_tags = QW.QPlainTextEdit( example_panel )
self._test_result = QW.QLineEdit( example_panel )
self._test_result.setReadOnly( True )
( background_colour, text_colour, namespace_info, separator, example_tags, show ) = tag_summary_generator.ToTuple()
self._show.setChecked( show )
self._background_colour.SetValue( background_colour )
self._text_colour.SetValue( text_colour )
self._namespaces_listbox.AddDatas( namespace_info )
self._separator.setText( separator )
self._example_tags.setPlainText( os.linesep.join( example_tags ) )
self._UpdateTest()
rows = []
rows.append( ( 'currently shows (turn off to hide): ', self._show ) )
gridbox = ClientGUICommon.WrapInGrid( show_panel, rows )
show_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
rows = []
rows.append( ( 'background colour: ', self._background_colour ) )
rows.append( ( 'text colour: ', self._text_colour ) )
gridbox = ClientGUICommon.WrapInGrid( edit_panel, rows )
edit_panel.Add( ClientGUICommon.BetterStaticText( edit_panel, 'The colours only work for the thumbnails right now!' ), CC.FLAGS_EXPAND_PERPENDICULAR )
edit_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
edit_panel.Add( self._namespaces_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
edit_panel.Add( ClientGUICommon.WrapInText( self._separator, edit_panel, 'separator' ), CC.FLAGS_EXPAND_PERPENDICULAR )
example_panel.Add( ClientGUICommon.BetterStaticText( example_panel, 'Enter some newline-separated tags here to see what your current object would generate.' ), CC.FLAGS_EXPAND_PERPENDICULAR )
example_panel.Add( self._example_tags, CC.FLAGS_EXPAND_BOTH_WAYS )
example_panel.Add( self._test_result, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, show_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, edit_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, example_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
self._show.clicked.connect( self._UpdateTest )
self._separator.textChanged.connect( self._UpdateTest )
self._example_tags.textChanged.connect( self._UpdateTest )
self._namespaces_listbox.listBoxChanged.connect( self._UpdateTest )
def _AddNamespaceInfo( self ):
namespace = ''
prefix = ''
separator = ', '
namespace_info = ( namespace, prefix, separator )
return self._EditNamespaceInfo( namespace_info )
def _ConvertNamespaceToListBoxString( self, namespace_info ):
( namespace, prefix, separator ) = namespace_info
if namespace == '':
pretty_namespace = 'unnamespaced'
else:
pretty_namespace = namespace
pretty_prefix = prefix
pretty_separator = separator
return pretty_namespace + ' | prefix: "' + pretty_prefix + '" | separator: "' + pretty_separator + '"'
def _EditNamespaceInfo( self, namespace_info ):
( namespace, prefix, separator ) = namespace_info
message = 'Edit namespace.'
with ClientGUIDialogs.DialogTextEntry( self, message, namespace, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
namespace = dlg.GetValue()
else:
raise HydrusExceptions.VetoException()
message = 'Edit prefix.'
with ClientGUIDialogs.DialogTextEntry( self, message, prefix, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
prefix = dlg.GetValue()
else:
raise HydrusExceptions.VetoException()
message = 'Edit separator.'
with ClientGUIDialogs.DialogTextEntry( self, message, separator, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
separator = dlg.GetValue()
namespace_info = ( namespace, prefix, separator )
return namespace_info
else:
raise HydrusExceptions.VetoException()
def _UpdateTest( self ):
tag_summary_generator = self.GetValue()
self._test_result.setText( tag_summary_generator.GenerateExampleSummary() )
def GetValue( self ) -> TagSummaryGenerator:
show = self._show.isChecked()
background_colour = self._background_colour.GetValue()
text_colour = self._text_colour.GetValue()
namespace_info = self._namespaces_listbox.GetData()
separator = self._separator.text()
example_tags = HydrusTags.CleanTags( HydrusText.DeserialiseNewlinedTexts( self._example_tags.toPlainText() ) )
return TagSummaryGenerator( background_colour, text_colour, namespace_info, separator, example_tags, show )
class TagSummaryGeneratorButton( ClientGUICommon.BetterButton ):
def __init__( self, parent: QW.QWidget, tag_summary_generator: TagSummaryGenerator ):
label = tag_summary_generator.GenerateExampleSummary()
ClientGUICommon.BetterButton.__init__( self, parent, label, self._Edit )
self._tag_summary_generator = tag_summary_generator
def _Edit( self ):
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit tag summary' ) as dlg:
panel = EditTagSummaryGeneratorPanel( dlg, self._tag_summary_generator )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
self._tag_summary_generator = panel.GetValue()
self.setText( self._tag_summary_generator.GenerateExampleSummary() )
def GetValue( self ) -> TagSummaryGenerator:
return self._tag_summary_generator
| true | true |
f7250c8113f5c4b5fe8357a30be38ead88265b94 | 139 | py | Python | aos_sw_api/globel_models/network_host.py | KennethSoelberg/AOS-Switch | a5a2c54917bbb69fab044bf0b313bcf795642d30 | [
"MIT"
] | null | null | null | aos_sw_api/globel_models/network_host.py | KennethSoelberg/AOS-Switch | a5a2c54917bbb69fab044bf0b313bcf795642d30 | [
"MIT"
] | 1 | 2020-12-24T15:36:56.000Z | 2021-01-28T23:19:57.000Z | aos_sw_api/globel_models/network_host.py | KennethSoelberg/AOS-Switch | a5a2c54917bbb69fab044bf0b313bcf795642d30 | [
"MIT"
] | 1 | 2021-02-16T23:26:28.000Z | 2021-02-16T23:26:28.000Z | from pydantic import BaseModel
from .ip_address import IpAddressModel
class NetworkHostModel(BaseModel):
ip_address: IpAddressModel
| 17.375 | 38 | 0.827338 | from pydantic import BaseModel
from .ip_address import IpAddressModel
class NetworkHostModel(BaseModel):
ip_address: IpAddressModel
| true | true |
f7250f700383b7cc2166cc898173234aba8a6194 | 301 | py | Python | photo/urls.py | firdausa7/MY-GALLERY | 5d2fe2727d760929800c14c11b0ff4c6d081584b | [
"MIT"
] | null | null | null | photo/urls.py | firdausa7/MY-GALLERY | 5d2fe2727d760929800c14c11b0ff4c6d081584b | [
"MIT"
] | 3 | 2020-06-05T23:24:25.000Z | 2021-06-10T22:02:41.000Z | photo/urls.py | firdausa7/MY-GALLERY | 5d2fe2727d760929800c14c11b0ff4c6d081584b | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns=[
#index path
url('^$', views.index,name='index'),
url('location/', views.category, name='location'),
url('category', views.category, name='category'),
url('search/', views.search_results, name='search_results'),
]
| 27.363636 | 64 | 0.671096 | from django.conf.urls import url
from . import views
urlpatterns=[
url('^$', views.index,name='index'),
url('location/', views.category, name='location'),
url('category', views.category, name='category'),
url('search/', views.search_results, name='search_results'),
]
| true | true |
f7250ff72bb64a4cd0a0a78f2a6db54775d4f74e | 3,253 | py | Python | Python/Police_Car_Game/Main.py | wilsonandusa/wilsonwu | 512214c187550f05497732e943f3323c15caeee0 | [
"Unlicense"
] | null | null | null | Python/Police_Car_Game/Main.py | wilsonandusa/wilsonwu | 512214c187550f05497732e943f3323c15caeee0 | [
"Unlicense"
] | null | null | null | Python/Police_Car_Game/Main.py | wilsonandusa/wilsonwu | 512214c187550f05497732e943f3323c15caeee0 | [
"Unlicense"
] | null | null | null | '''copyright Xiaosheng Wu Python game 12/31/2015'''
import pygame, sys
from classes import *
from process import *
pygame.init()
SCREENWIDTH,SCREENHEIGHT = 767,1257
screen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT)) #zero for the flag 32 for color
BackGround = pygame.image.load("images/bg.png")
Header = pygame.image.load("images/Header.png")
clock = pygame.time.Clock()
FPS = 24 #frames per sec
flag = 2 #randint(0,2) # if 1
total_frames = 0#fivesecondinterval = FPS*5
if flag == 0:
car1 = Car(500,750,64,32,"images/car1.png")#if flag == 0: # both car horizontal movement
car2 = Car(300,1000,64,32,"images/car2.png")
bus = Bus(300,300,100,34,"images/bus.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
elif flag==1:
car1 = Car(0,700,64,32,"images/car1_down.png")#if flag = 1 # both cars vertical movement
car2 = Car(200,350,64,32,"images/car2_down.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
bus = Bus(300,300,100,34,"images/bus_down.png")
elif flag == 2:
car1 = Car(200,100,64,32,"images/car1.png")#blue car vertical red car horizontal
car2 = Car(400,300,64,32,"images/car2_down.png")
car3 = Car(600,500,64,32,"images/car1.png")
car4 = Car(100,700,64,32,"images/car2_down.png")
car5 = Car(200,900,64,32,"images/car1.png")
car6 = Car(300,1100,64,32,"images/car2_down.png")
car7 = Car(200,900,64,32,"images/car1.png")
car8 = Car(300,1100,64,32,"images/car2_down.png")
car9 = Car(200,900,64,32,"images/car1.png")
car10 = Car(300,1100,64,32,"images/car2_down.png")
bus1 = Bus(300,300,100,34,"images/bus.png")
bus2 = Bus(600,300,100,34,"images/bus_down.png")
bus3 = Bus(100,450,100,34,"images/bus_down.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
#---------------Main Program Loop------------------
while True:
#PROCESS
process_onecar(copcar,FPS,total_frames,flag)
copProjectile.movement()
#LOGIC
if flag==0:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
Car.bothmovement_x(SCREENWIDTH,SCREENHEIGHT)
Bus.bothmovement_x(SCREENWIDTH,SCREENHEIGHT)
elif flag==1:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
Car.bothmovement_y(SCREENWIDTH,SCREENHEIGHT)
Bus.bothmovement_y(SCREENWIDTH,SCREENHEIGHT)
elif flag == 2:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
car1.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car2.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car3.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car4.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car5.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car6.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car7.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car8.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car9.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car10.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
bus1.bus_motion_x(SCREENWIDTH,SCREENHEIGHT)
bus2.bus_motion_y(SCREENWIDTH,SCREENHEIGHT)
bus3.bus_motion_y(SCREENWIDTH,SCREENHEIGHT)
#LOGIC
total_frames+=1
#DRAW
#screen.fill([255,255,255])aaaa
screen.blit(BackGround,(0,0))
screen.blit(Header,(0,0))
BaseClass.allsprites.draw(screen)
copProjectile.List.draw(screen)
pygame.display.flip()
#DRAW
clock.tick(FPS)
| 37.390805 | 93 | 0.748232 | import pygame, sys
from classes import *
from process import *
pygame.init()
SCREENWIDTH,SCREENHEIGHT = 767,1257
screen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))
BackGround = pygame.image.load("images/bg.png")
Header = pygame.image.load("images/Header.png")
clock = pygame.time.Clock()
FPS = 24
flag = 2 frames = 0
if flag == 0:
car1 = Car(500,750,64,32,"images/car1.png")mages/car2.png")
bus = Bus(300,300,100,34,"images/bus.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
elif flag==1:
car1 = Car(0,700,64,32,"images/car1_down.png")mages/car2_down.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
bus = Bus(300,300,100,34,"images/bus_down.png")
elif flag == 2:
car1 = Car(200,100,64,32,"images/car1.png")
car2 = Car(400,300,64,32,"images/car2_down.png")
car3 = Car(600,500,64,32,"images/car1.png")
car4 = Car(100,700,64,32,"images/car2_down.png")
car5 = Car(200,900,64,32,"images/car1.png")
car6 = Car(300,1100,64,32,"images/car2_down.png")
car7 = Car(200,900,64,32,"images/car1.png")
car8 = Car(300,1100,64,32,"images/car2_down.png")
car9 = Car(200,900,64,32,"images/car1.png")
car10 = Car(300,1100,64,32,"images/car2_down.png")
bus1 = Bus(300,300,100,34,"images/bus.png")
bus2 = Bus(600,300,100,34,"images/bus_down.png")
bus3 = Bus(100,450,100,34,"images/bus_down.png")
copcar = Cop(SCREENWIDTH-90,SCREENHEIGHT-90,90,45,"images/cop.png")
while True:
process_onecar(copcar,FPS,total_frames,flag)
copProjectile.movement()
if flag==0:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
Car.bothmovement_x(SCREENWIDTH,SCREENHEIGHT)
Bus.bothmovement_x(SCREENWIDTH,SCREENHEIGHT)
elif flag==1:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
Car.bothmovement_y(SCREENWIDTH,SCREENHEIGHT)
Bus.bothmovement_y(SCREENWIDTH,SCREENHEIGHT)
elif flag == 2:
copcar.motion(SCREENWIDTH,SCREENHEIGHT)
Car.update_all(SCREENWIDTH,SCREENHEIGHT)
car1.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car2.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car3.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car4.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car5.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car6.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car7.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car8.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
car9.car_motion_x(SCREENWIDTH,SCREENHEIGHT)
car10.car_motion_y(SCREENWIDTH,SCREENHEIGHT)
bus1.bus_motion_x(SCREENWIDTH,SCREENHEIGHT)
bus2.bus_motion_y(SCREENWIDTH,SCREENHEIGHT)
bus3.bus_motion_y(SCREENWIDTH,SCREENHEIGHT)
total_frames+=1
screen.blit(BackGround,(0,0))
screen.blit(Header,(0,0))
BaseClass.allsprites.draw(screen)
copProjectile.List.draw(screen)
pygame.display.flip()
clock.tick(FPS)
| true | true |
f7251086cbee9232ee1a4c2ae76bb737b8cda266 | 1,378 | py | Python | backend-project/small_eod/letters/migrations/0004_auto_20200221_1956.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 64 | 2019-12-30T11:24:03.000Z | 2021-06-24T01:04:56.000Z | backend-project/small_eod/letters/migrations/0004_auto_20200221_1956.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 465 | 2018-06-13T21:43:43.000Z | 2022-01-04T23:33:56.000Z | backend-project/small_eod/letters/migrations/0004_auto_20200221_1956.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 72 | 2018-12-02T19:47:03.000Z | 2022-01-04T22:54:49.000Z | # Generated by Django 3.0.3 on 2020-02-21 19:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('letters', '0003_auto_20200110_0200'),
]
operations = [
migrations.AlterField(
model_name='letter',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
),
migrations.AlterField(
model_name='letter',
name='created_on',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date of creation'),
),
migrations.AlterField(
model_name='letter',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_modified_by', to=settings.AUTH_USER_MODEL, verbose_name='Modified by'),
),
migrations.AlterField(
model_name='letter',
name='modified_on',
field=models.DateTimeField(auto_now=True, verbose_name='Date of the modification'),
),
]
| 37.243243 | 199 | 0.659652 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('letters', '0003_auto_20200110_0200'),
]
operations = [
migrations.AlterField(
model_name='letter',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
),
migrations.AlterField(
model_name='letter',
name='created_on',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date of creation'),
),
migrations.AlterField(
model_name='letter',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_modified_by', to=settings.AUTH_USER_MODEL, verbose_name='Modified by'),
),
migrations.AlterField(
model_name='letter',
name='modified_on',
field=models.DateTimeField(auto_now=True, verbose_name='Date of the modification'),
),
]
| true | true |
f725114cc0cb5e35486379975f0d3386787546b9 | 1,736 | py | Python | Labs/Lab 1 Midway RCC and mpi4py/mpi_rand_walk.py | cindychu/LargeScaleComputing_S20 | 913b0155f47914c258b503df677067a510dd23f5 | [
"MIT"
] | null | null | null | Labs/Lab 1 Midway RCC and mpi4py/mpi_rand_walk.py | cindychu/LargeScaleComputing_S20 | 913b0155f47914c258b503df677067a510dd23f5 | [
"MIT"
] | null | null | null | Labs/Lab 1 Midway RCC and mpi4py/mpi_rand_walk.py | cindychu/LargeScaleComputing_S20 | 913b0155f47914c258b503df677067a510dd23f5 | [
"MIT"
] | null | null | null |
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import time
def sim_rand_walks_parallel(n_runs):
# Get rank of process and overall size of communicator:
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# Start time:
t0 = time.time()
# Evenly distribute number of simulation runs across processes
N = int(n_runs/size)
# Simulate N random walks and specify as a NumPy Array
r_walks = []
for i in range(N):
steps = np.random.normal(loc=0, scale=1, size=100)
steps[0] = 0
r_walks.append(100 + np.cumsum(steps))
r_walks_array = np.array(r_walks)
# Gather all simulation arrays to buffer of expected size/dtype on rank 0
r_walks_all = None
if rank == 0:
r_walks_all = np.empty([N*size, 100], dtype='float')
comm.Gather(sendbuf = r_walks_array, recvbuf = r_walks_all, root=0)
# Print/plot simulation results on rank 0
if rank == 0:
# Calculate time elapsed after computing mean and std
average_finish = np.mean(r_walks_all[:,-1])
std_finish = np.std(r_walks_all[:,-1])
time_elapsed = time.time() - t0
# Print time elapsed + simulation results
print("Simulated %d Random Walks in: %f seconds on %d MPI processes"
% (n_runs, time_elapsed, size))
print("Average final position: %f, Standard Deviation: %f"
% (average_finish, std_finish))
# Plot Simulations and save to file
plt.plot(r_walks_all.transpose())
plt.savefig("r_walk_nprocs%d_nruns%d.png" % (size, n_runs))
return
def main():
sim_rand_walks_parallel(n_runs = 10000)
if __name__ == '__main__':
main()
| 30.45614 | 77 | 0.645161 |
from mpi4py import MPI
import matplotlib.pyplot as plt
import numpy as np
import time
def sim_rand_walks_parallel(n_runs):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
t0 = time.time()
N = int(n_runs/size)
r_walks = []
for i in range(N):
steps = np.random.normal(loc=0, scale=1, size=100)
steps[0] = 0
r_walks.append(100 + np.cumsum(steps))
r_walks_array = np.array(r_walks)
r_walks_all = None
if rank == 0:
r_walks_all = np.empty([N*size, 100], dtype='float')
comm.Gather(sendbuf = r_walks_array, recvbuf = r_walks_all, root=0)
if rank == 0:
average_finish = np.mean(r_walks_all[:,-1])
std_finish = np.std(r_walks_all[:,-1])
time_elapsed = time.time() - t0
print("Simulated %d Random Walks in: %f seconds on %d MPI processes"
% (n_runs, time_elapsed, size))
print("Average final position: %f, Standard Deviation: %f"
% (average_finish, std_finish))
plt.plot(r_walks_all.transpose())
plt.savefig("r_walk_nprocs%d_nruns%d.png" % (size, n_runs))
return
def main():
sim_rand_walks_parallel(n_runs = 10000)
if __name__ == '__main__':
main()
| true | true |
f72511a3099af2e0476081a70e6b3d479159a8c0 | 1,950 | py | Python | tests/testhelpers/override_testhelper_err2.py | dbarnett/pytypes | da056359a8d1dad174316195830a1cb0574893af | [
"Apache-2.0"
] | 189 | 2016-09-17T13:45:58.000Z | 2022-03-12T10:53:42.000Z | tests/testhelpers/override_testhelper_err2.py | dbarnett/pytypes | da056359a8d1dad174316195830a1cb0574893af | [
"Apache-2.0"
] | 104 | 2017-02-23T16:43:18.000Z | 2022-03-17T17:36:18.000Z | tests/testhelpers/override_testhelper_err2.py | dbarnett/pytypes | da056359a8d1dad174316195830a1cb0574893af | [
"Apache-2.0"
] | 21 | 2017-02-17T08:05:12.000Z | 2021-12-08T11:22:15.000Z | # Copyright 2017 Stefan Richthofer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created on 01.12.2016
"""
Designed to cause a NameError on import.
(unless typechecker.check_override_at_runtime == False)
"""
from pytypes import override
class TestClass():
def test_meth0(self, a):
# type: (int) -> str
pass
def test_meth1(self, a):
# type: (TestArg2) -> str
pass
def test_meth2(self, a):
# type: (int) -> TestResult1
pass
class TestClass2(TestClass):
@override
def test_meth0(self, a):
# type: (int) -> str
pass
@override
def test_meth1(self, a):
# type: (TestArg1) -> str
pass
@override
def test_meth2(self, a):
# type: (int) -> TestResult2
pass
class TestClass3(TestClass):
@override
def test_meth1(self, a):
# type: (TestArg1) -> str
pass
@override
def test_meth2(self, a):
# type: (int) -> TestResult2
pass
class TestArg1():
pass
class TestResult1():
pass
class TestClass3(TestClass):
@override
def test_meth1(self,
a # type: TestArg1
):
# type: (...) -> strr
pass
@override
def test_meth2(self,
a # type: int
):
# type: (...) -> TestResult2
pass
class TestArg2(TestArg1):
pass
class TestResult2(TestResult1):
pass
| 21.428571 | 74 | 0.606667 |
from pytypes import override
class TestClass():
def test_meth0(self, a):
pass
def test_meth1(self, a):
pass
def test_meth2(self, a):
pass
class TestClass2(TestClass):
@override
def test_meth0(self, a):
pass
@override
def test_meth1(self, a):
pass
@override
def test_meth2(self, a):
pass
class TestClass3(TestClass):
@override
def test_meth1(self, a):
pass
@override
def test_meth2(self, a):
pass
class TestArg1():
pass
class TestResult1():
pass
class TestClass3(TestClass):
@override
def test_meth1(self,
a
):
pass
@override
def test_meth2(self,
a
):
pass
class TestArg2(TestArg1):
pass
class TestResult2(TestResult1):
pass
| true | true |
f72512165bd2c1034b3a55e9374f6cdaed5ced1b | 2,873 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/LinkClickedEventHandler.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/System/Windows/Forms/__init___parts/LinkClickedEventHandler.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/System/Windows/Forms/__init___parts/LinkClickedEventHandler.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class LinkClickedEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the System.Windows.Forms.RichTextBox.LinkClicked event of a System.Windows.Forms.RichTextBox.
LinkClickedEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: LinkClickedEventHandler,sender: object,e: LinkClickedEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: LinkClickedEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,e):
""" Invoke(self: LinkClickedEventHandler,sender: object,e: LinkClickedEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
| 30.242105 | 215 | 0.719457 | class LinkClickedEventHandler(MulticastDelegate,ICloneable,ISerializable):
def BeginInvoke(self,sender,e,callback,object):
pass
def CombineImpl(self,*args):
pass
def DynamicInvokeImpl(self,*args):
pass
def EndInvoke(self,result):
pass
def GetMethodImpl(self,*args):
pass
def Invoke(self,sender,e):
pass
def RemoveImpl(self,*args):
pass
def __init__(self,*args):
pass
@staticmethod
def __new__(self,object,method):
pass
def __reduce_ex__(self,*args):
pass
| true | true |
f72513565d42f73aae9ae75dc0d14b21b6416c46 | 318 | py | Python | hossein/university/595/595_v2.py | mhdehghan/quera-answers | 28dc0a9dbe3697593d7cbe05c9f05db2d3b01790 | [
"MIT"
] | null | null | null | hossein/university/595/595_v2.py | mhdehghan/quera-answers | 28dc0a9dbe3697593d7cbe05c9f05db2d3b01790 | [
"MIT"
] | null | null | null | hossein/university/595/595_v2.py | mhdehghan/quera-answers | 28dc0a9dbe3697593d7cbe05c9f05db2d3b01790 | [
"MIT"
] | null | null | null | plus = lambda x, y: x + y
current_list = [0, 1]
next_list = []
n = int(input())
if n > 0: print(1)
for i in range(n-1):
current_list.append(0)
next_list = list(map(plus, current_list[1:], current_list))
print(*next_list,sep=' ')
current_list = next_list
current_list.insert(0, 0)
next_list = [] | 26.5 | 63 | 0.628931 | plus = lambda x, y: x + y
current_list = [0, 1]
next_list = []
n = int(input())
if n > 0: print(1)
for i in range(n-1):
current_list.append(0)
next_list = list(map(plus, current_list[1:], current_list))
print(*next_list,sep=' ')
current_list = next_list
current_list.insert(0, 0)
next_list = [] | true | true |
f7251431a4069a8242c3b58bab2e52b693aa37b9 | 810 | py | Python | safe_transaction_service/tokens/tasks.py | cryptopossum/safe-transaction-service | 38069e5f4514be51c6f14e395a135d03f0c03887 | [
"MIT"
] | null | null | null | safe_transaction_service/tokens/tasks.py | cryptopossum/safe-transaction-service | 38069e5f4514be51c6f14e395a135d03f0c03887 | [
"MIT"
] | null | null | null | safe_transaction_service/tokens/tasks.py | cryptopossum/safe-transaction-service | 38069e5f4514be51c6f14e395a135d03f0c03887 | [
"MIT"
] | 1 | 2021-06-09T06:20:49.000Z | 2021-06-09T06:20:49.000Z | from typing import Optional
from celery import app
from celery.utils.log import get_task_logger
from gnosis.eth import EthereumClientProvider
from gnosis.eth.ethereum_client import EthereumNetwork
from safe_transaction_service.history.utils import close_gevent_db_connection
from .models import Token
logger = get_task_logger(__name__)
@app.shared_task()
def fix_pool_tokens_task() -> Optional[int]:
ethereum_client = EthereumClientProvider()
ethereum_network = ethereum_client.get_network()
if ethereum_network == EthereumNetwork.MAINNET:
try:
number = Token.pool_tokens.fix_all_pool_tokens()
if number:
logger.info('%d pool token names were fixed', number)
return number
finally:
close_gevent_db_connection()
| 28.928571 | 77 | 0.738272 | from typing import Optional
from celery import app
from celery.utils.log import get_task_logger
from gnosis.eth import EthereumClientProvider
from gnosis.eth.ethereum_client import EthereumNetwork
from safe_transaction_service.history.utils import close_gevent_db_connection
from .models import Token
logger = get_task_logger(__name__)
@app.shared_task()
def fix_pool_tokens_task() -> Optional[int]:
ethereum_client = EthereumClientProvider()
ethereum_network = ethereum_client.get_network()
if ethereum_network == EthereumNetwork.MAINNET:
try:
number = Token.pool_tokens.fix_all_pool_tokens()
if number:
logger.info('%d pool token names were fixed', number)
return number
finally:
close_gevent_db_connection()
| true | true |
f7251634c09abfd3f03813bfef073fd95ca209ef | 9,885 | py | Python | amqpstorm/tests/functional/management/test_queue.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 140 | 2016-06-07T18:53:57.000Z | 2022-03-23T01:50:15.000Z | amqpstorm/tests/functional/management/test_queue.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 85 | 2016-04-11T23:32:32.000Z | 2022-03-19T07:21:21.000Z | amqpstorm/tests/functional/management/test_queue.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 38 | 2016-04-20T20:21:13.000Z | 2022-03-23T05:31:58.000Z | from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class ApiQueueFunctionalTests(TestFunctionalFramework):
@setup(queue=False)
def test_api_queue_get(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queue = api.queue.get(self.queue_name)
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_list(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queues = api.queue.list()
self.assertIsInstance(queues, list)
self.assertGreater(len(queues), 0)
for queue in queues:
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('vhost', queue)
self.assertIn('node', queue)
self.assertIn('durable', queue)
self.assertIn('arguments', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_list_pagination(self):
queues_to_create = 33
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
for index in range(queues_to_create):
api.queue.declare(
'pagination-%d' % (index + 1), virtual_host=self.queue_name
)
queues = api.queue.list(
name='pagination-',
page_size=3,
virtual_host=self.queue_name
)
finally:
for index in range(queues_to_create):
api.queue.delete(
'pagination-%d' % (index + 1), virtual_host=self.queue_name
)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), queues_to_create)
@setup(queue=False)
def test_api_queue_list_no_pagination(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(
page_size=None, virtual_host=self.queue_name
)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 3)
@setup(queue=False)
def test_api_queue_list_filter_with_regex(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='^ab', use_regex='true',
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_filter_with_regex_boolean(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='^ab', use_regex=True,
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_filter_without_regex(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='ab', use_regex=False,
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_all(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queues = api.queue.list(show_all=True)
self.assertIsInstance(queues, list)
self.assertGreater(len(queues), 0)
for queue in queues:
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('vhost', queue)
self.assertIn('node', queue)
self.assertIn('durable', queue)
self.assertIn('arguments', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_declare(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
self.assertIsNone(api.queue.declare(self.queue_name, durable=True))
result = api.queue.get(self.queue_name)
self.assertIsInstance(result, dict)
self.assertEqual(result['name'], self.queue_name)
self.assertEqual(result['auto_delete'], False)
self.assertEqual(result['durable'], True)
@setup(new_connection=False)
def test_api_queue_declare_passive(self):
expected_error_message = (
'NOT-FOUND - The client attempted to work '
'with a server entity that does not exist.'
)
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
try:
api.queue.declare(self.queue_name, passive=True)
except ApiError as why:
self.assertEqual(str(why), expected_error_message)
self.assertEqual(why.error_type, 'NOT-FOUND')
self.assertEqual(why.error_code, 404)
@setup(new_connection=False)
def test_api_queue_declare_passive_exists(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertIsNotNone(api.queue.declare(self.queue_name, passive=True))
@setup(new_connection=False)
def test_api_queue_delete(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
try:
api.queue.declare(self.queue_name, durable=True)
self.assertIsInstance(api.queue.get(self.queue_name), dict)
finally:
api.queue.delete(self.queue_name)
try:
api.queue.declare(self.queue_name, passive=True)
except ApiError as why:
self.assertEqual(why.error_code, 404)
@setup(queue=True)
def test_api_queue_purge(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertIsNone(api.queue.purge(self.queue_name))
@setup(queue=True)
def test_api_queue_bind(self):
exchange_name = 'amq.direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
bindings = len(api.queue.bindings(self.queue_name))
self.assertIsNone(api.queue.bind(queue=self.queue_name,
exchange=exchange_name,
routing_key=self.queue_name,
arguments=None))
self.assertEqual(len(api.queue.bindings(self.queue_name)),
bindings + 1)
@setup(queue=True)
def test_api_queue_unbind(self):
exchange_name = 'amq.direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
bindings = len(api.queue.bindings(self.queue_name))
api.queue.bind(queue=self.queue_name, exchange=exchange_name,
routing_key=self.queue_name, arguments=None)
self.assertEqual(len(api.queue.bindings(self.queue_name)),
bindings + 1)
self.assertIsNone(api.queue.unbind(queue=self.queue_name,
exchange=exchange_name,
routing_key=self.queue_name))
self.assertEqual(len(api.queue.bindings(self.queue_name)), bindings)
| 36.884328 | 79 | 0.628123 | from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class ApiQueueFunctionalTests(TestFunctionalFramework):
@setup(queue=False)
def test_api_queue_get(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queue = api.queue.get(self.queue_name)
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_list(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queues = api.queue.list()
self.assertIsInstance(queues, list)
self.assertGreater(len(queues), 0)
for queue in queues:
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('vhost', queue)
self.assertIn('node', queue)
self.assertIn('durable', queue)
self.assertIn('arguments', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_list_pagination(self):
queues_to_create = 33
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
for index in range(queues_to_create):
api.queue.declare(
'pagination-%d' % (index + 1), virtual_host=self.queue_name
)
queues = api.queue.list(
name='pagination-',
page_size=3,
virtual_host=self.queue_name
)
finally:
for index in range(queues_to_create):
api.queue.delete(
'pagination-%d' % (index + 1), virtual_host=self.queue_name
)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), queues_to_create)
@setup(queue=False)
def test_api_queue_list_no_pagination(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(
page_size=None, virtual_host=self.queue_name
)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 3)
@setup(queue=False)
def test_api_queue_list_filter_with_regex(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='^ab', use_regex='true',
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_filter_with_regex_boolean(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='^ab', use_regex=True,
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_filter_without_regex(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.virtual_host.create(self.queue_name)
try:
api.queue.declare('abc', virtual_host=self.queue_name)
api.queue.declare('def', virtual_host=self.queue_name)
api.queue.declare('ghi', virtual_host=self.queue_name)
queues = api.queue.list(name='ab', use_regex=False,
virtual_host=self.queue_name)
finally:
api.queue.delete('abc', virtual_host=self.queue_name)
api.queue.delete('def', virtual_host=self.queue_name)
api.queue.delete('ghi', virtual_host=self.queue_name)
self.api.virtual_host.delete(self.queue_name)
self.assertIsInstance(queues, list)
self.assertEqual(len(queues), 1)
@setup(queue=False)
def test_api_queue_list_all(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
queues = api.queue.list(show_all=True)
self.assertIsInstance(queues, list)
self.assertGreater(len(queues), 0)
for queue in queues:
self.assertIsInstance(queue, dict)
self.assertIn('name', queue)
self.assertIn('vhost', queue)
self.assertIn('node', queue)
self.assertIn('durable', queue)
self.assertIn('arguments', queue)
self.assertIn('auto_delete', queue)
@setup(queue=False)
def test_api_queue_declare(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
self.assertIsNone(api.queue.declare(self.queue_name, durable=True))
result = api.queue.get(self.queue_name)
self.assertIsInstance(result, dict)
self.assertEqual(result['name'], self.queue_name)
self.assertEqual(result['auto_delete'], False)
self.assertEqual(result['durable'], True)
@setup(new_connection=False)
def test_api_queue_declare_passive(self):
expected_error_message = (
'NOT-FOUND - The client attempted to work '
'with a server entity that does not exist.'
)
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
try:
api.queue.declare(self.queue_name, passive=True)
except ApiError as why:
self.assertEqual(str(why), expected_error_message)
self.assertEqual(why.error_type, 'NOT-FOUND')
self.assertEqual(why.error_code, 404)
@setup(new_connection=False)
def test_api_queue_declare_passive_exists(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertIsNotNone(api.queue.declare(self.queue_name, passive=True))
@setup(new_connection=False)
def test_api_queue_delete(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
try:
api.queue.declare(self.queue_name, durable=True)
self.assertIsInstance(api.queue.get(self.queue_name), dict)
finally:
api.queue.delete(self.queue_name)
try:
api.queue.declare(self.queue_name, passive=True)
except ApiError as why:
self.assertEqual(why.error_code, 404)
@setup(queue=True)
def test_api_queue_purge(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertIsNone(api.queue.purge(self.queue_name))
@setup(queue=True)
def test_api_queue_bind(self):
exchange_name = 'amq.direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
bindings = len(api.queue.bindings(self.queue_name))
self.assertIsNone(api.queue.bind(queue=self.queue_name,
exchange=exchange_name,
routing_key=self.queue_name,
arguments=None))
self.assertEqual(len(api.queue.bindings(self.queue_name)),
bindings + 1)
@setup(queue=True)
def test_api_queue_unbind(self):
exchange_name = 'amq.direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
bindings = len(api.queue.bindings(self.queue_name))
api.queue.bind(queue=self.queue_name, exchange=exchange_name,
routing_key=self.queue_name, arguments=None)
self.assertEqual(len(api.queue.bindings(self.queue_name)),
bindings + 1)
self.assertIsNone(api.queue.unbind(queue=self.queue_name,
exchange=exchange_name,
routing_key=self.queue_name))
self.assertEqual(len(api.queue.bindings(self.queue_name)), bindings)
| true | true |
f72516a6c5f55d8207f7aef5e97d7acd0c0e1e7d | 350 | py | Python | BOOK/MAIN/05-file-handling/chapter-5-examples/07-count-vowels-consonants.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/07-count-vowels-consonants.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/07-count-vowels-consonants.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null |
fileObj = open('answer.txt',"r")
ch = ""
vCount = 0
cCount = 0
while ch:
ch = fileObj.read(1) #one character read from file
if ch in ['A','a','E','e','I','i','O','o','U','u']:
vCount+=1
else:
cCount+=1
print("Vowels in the file: ", vCount)
print("Consonants in the file: ",cCount)
#close the file
fileObj.close() | 21.875 | 55 | 0.56 |
fileObj = open('answer.txt',"r")
ch = ""
vCount = 0
cCount = 0
while ch:
ch = fileObj.read(1)
if ch in ['A','a','E','e','I','i','O','o','U','u']:
vCount+=1
else:
cCount+=1
print("Vowels in the file: ", vCount)
print("Consonants in the file: ",cCount)
fileObj.close() | true | true |
f72517727d88232198a9d0d468e299f69e2a632b | 4,416 | py | Python | venv/Lib/site-packages/ipyparallel/controller/mongodb.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | 69 | 2019-02-18T12:07:35.000Z | 2022-03-12T10:38:32.000Z | ipyparallel/controller/mongodb.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-12-06T22:06:49.000Z | 2022-02-25T17:40:44.000Z | ipyparallel/controller/mongodb.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 28 | 2019-03-22T01:07:13.000Z | 2022-02-21T16:38:27.000Z | """A TaskRecord backend using mongodb
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
try:
from pymongo import MongoClient
except ImportError:
from pymongo import Connection as MongoClient
# bson.Binary import moved
try:
from bson.binary import Binary
except ImportError:
from bson import Binary
from traitlets import Dict, List, Unicode, Instance
from .dictdb import BaseDB
#-----------------------------------------------------------------------------
# MongoDB class
#-----------------------------------------------------------------------------
class MongoDB(BaseDB):
"""MongoDB TaskRecord backend."""
connection_args = List(config=True,
help="""Positional arguments to be passed to pymongo.MongoClient. Only
necessary if the default mongodb configuration does not point to your
mongod instance.""")
connection_kwargs = Dict(config=True,
help="""Keyword arguments to be passed to pymongo.MongoClient. Only
necessary if the default mongodb configuration does not point to your
mongod instance."""
)
database = Unicode("ipython-tasks", config=True,
help="""The MongoDB database name to use for storing tasks for this session. If unspecified,
a new database will be created with the Hub's IDENT. Specifying the database will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
_connection = Instance(MongoClient, allow_none=True) # pymongo connection
def __init__(self, **kwargs):
super(MongoDB, self).__init__(**kwargs)
if self._connection is None:
self._connection = MongoClient(*self.connection_args, **self.connection_kwargs)
if not self.database:
self.database = self.session
self._db = self._connection[self.database]
self._records = self._db['task_records']
self._records.ensure_index('msg_id', unique=True)
self._records.ensure_index('submitted') # for sorting history
# for rec in self._records.find
def _binary_buffers(self, rec):
for key in ('buffers', 'result_buffers'):
if rec.get(key, None):
rec[key] = list(map(Binary, rec[key]))
return rec
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
# print rec
rec = self._binary_buffers(rec)
self._records.insert(rec)
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
r = self._records.find_one({'msg_id': msg_id})
if not r:
# r will be '' if nothing is found
raise KeyError(msg_id)
return r
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
rec = self._binary_buffers(rec)
self._records.update({'msg_id':msg_id}, {'$set': rec})
def drop_matching_records(self, check):
"""Remove a record from the DB."""
self._records.remove(check)
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._records.remove({'msg_id':msg_id})
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys and 'msg_id' not in keys:
keys.append('msg_id')
matches = list(self._records.find(check,keys))
for rec in matches:
rec.pop('_id')
return matches
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
cursor = self._records.find({},{'msg_id':1}).sort('submitted')
return [ rec['msg_id'] for rec in cursor ]
| 35.047619 | 100 | 0.585824 |
try:
from pymongo import MongoClient
except ImportError:
from pymongo import Connection as MongoClient
try:
from bson.binary import Binary
except ImportError:
from bson import Binary
from traitlets import Dict, List, Unicode, Instance
from .dictdb import BaseDB
class MongoDB(BaseDB):
connection_args = List(config=True,
help="""Positional arguments to be passed to pymongo.MongoClient. Only
necessary if the default mongodb configuration does not point to your
mongod instance.""")
connection_kwargs = Dict(config=True,
help="""Keyword arguments to be passed to pymongo.MongoClient. Only
necessary if the default mongodb configuration does not point to your
mongod instance."""
)
database = Unicode("ipython-tasks", config=True,
help="""The MongoDB database name to use for storing tasks for this session. If unspecified,
a new database will be created with the Hub's IDENT. Specifying the database will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
_connection = Instance(MongoClient, allow_none=True)
def __init__(self, **kwargs):
super(MongoDB, self).__init__(**kwargs)
if self._connection is None:
self._connection = MongoClient(*self.connection_args, **self.connection_kwargs)
if not self.database:
self.database = self.session
self._db = self._connection[self.database]
self._records = self._db['task_records']
self._records.ensure_index('msg_id', unique=True)
self._records.ensure_index('submitted')
def _binary_buffers(self, rec):
for key in ('buffers', 'result_buffers'):
if rec.get(key, None):
rec[key] = list(map(Binary, rec[key]))
return rec
def add_record(self, msg_id, rec):
rec = self._binary_buffers(rec)
self._records.insert(rec)
def get_record(self, msg_id):
r = self._records.find_one({'msg_id': msg_id})
if not r:
raise KeyError(msg_id)
return r
def update_record(self, msg_id, rec):
rec = self._binary_buffers(rec)
self._records.update({'msg_id':msg_id}, {'$set': rec})
def drop_matching_records(self, check):
self._records.remove(check)
def drop_record(self, msg_id):
self._records.remove({'msg_id':msg_id})
def find_records(self, check, keys=None):
if keys and 'msg_id' not in keys:
keys.append('msg_id')
matches = list(self._records.find(check,keys))
for rec in matches:
rec.pop('_id')
return matches
def get_history(self):
cursor = self._records.find({},{'msg_id':1}).sort('submitted')
return [ rec['msg_id'] for rec in cursor ]
| true | true |
f7251940c8d1976a314e9a83de4640eaf7110298 | 1,134 | py | Python | tools/gen_shake_256_sum.py | dpensi/insights-data-schemas | a60d673ce4053b8554e09b7bd08e518f9546727c | [
"Apache-2.0"
] | 1 | 2020-12-07T09:19:32.000Z | 2020-12-07T09:19:32.000Z | tools/gen_shake_256_sum.py | dpensi/insights-data-schemas | a60d673ce4053b8554e09b7bd08e518f9546727c | [
"Apache-2.0"
] | 36 | 2020-12-31T10:02:44.000Z | 2022-02-21T12:09:56.000Z | tools/gen_shake_256_sum.py | dpensi/insights-data-schemas | a60d673ce4053b8554e09b7bd08e518f9546727c | [
"Apache-2.0"
] | 6 | 2020-12-07T09:19:35.000Z | 2022-02-01T14:39:22.000Z | #!/usr/bin/env python3
# Copyright © 2021 Pavel Tisnovsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generator of SHAKE-256 sum values."""
import hashlib
with open("input.txt", "r") as fin:
for input_string in fin:
# remove EOLN
input_string = input_string[:-1]
# compute hash
shake_256_sum = hashlib.shake_256()
shake_256_sum.update(input_string.encode("UTF-8"))
# prepare special chars for output
input_string = input_string.replace("\t", "<Tab>")
# generate output
print(' "{}", # "{}"'.format(shake_256_sum.hexdigest(32), input_string))
| 32.4 | 84 | 0.689594 |
import hashlib
with open("input.txt", "r") as fin:
for input_string in fin:
input_string = input_string[:-1]
shake_256_sum = hashlib.shake_256()
shake_256_sum.update(input_string.encode("UTF-8"))
input_string = input_string.replace("\t", "<Tab>")
print(' "{}", # "{}"'.format(shake_256_sum.hexdigest(32), input_string))
| true | true |
f725194997751cabcf7176a1909560de88b4ee0e | 8,176 | py | Python | src/train.py | ahernandez1801/donkey_rl_mqtt | 02bbfc3d036220a4061b95e50780984e657aff43 | [
"BSD-3-Clause"
] | null | null | null | src/train.py | ahernandez1801/donkey_rl_mqtt | 02bbfc3d036220a4061b95e50780984e657aff43 | [
"BSD-3-Clause"
] | null | null | null | src/train.py | ahernandez1801/donkey_rl_mqtt | 02bbfc3d036220a4061b95e50780984e657aff43 | [
"BSD-3-Clause"
] | null | null | null | '''
Train
Train your nerual network
Author: Tawn Kramer
'''
from __future__ import print_function
import os
import sys
import glob
import time
import fnmatch
import argparse
import numpy as np
from PIL import Image
import keras
import conf
import random
import augment
import models
'''
matplotlib can be a pain to setup. So handle the case where it is absent. When present,
use it to generate a plot of training results.
'''
try:
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
do_plot = True
except:
do_plot = False
def shuffle(samples):
'''
randomly mix a list and return a new list
'''
ret_arr = []
len_samples = len(samples)
while len_samples > 0:
iSample = random.randrange(0, len_samples)
ret_arr.append(samples[iSample])
del samples[iSample]
len_samples -= 1
return ret_arr
def parse_img_filepath(filepath):
basename = os.path.basename(filepath)
#less .jpg
f = basename[:-4]
f = f.split('_')
steering = float(f[3])
throttle = float(f[5])
data = {'steering':steering, 'throttle':throttle }
return data
def generator(samples, batch_size=32, perc_to_augment=0.5):
'''
Rather than keep all data in memory, we will make a function that keeps
it's state and returns just the latest batch required via the yield command.
As we load images, we can optionally augment them in some manner that doesn't
change their underlying meaning or features. This is a combination of
brightness, contrast, sharpness, and color PIL image filters applied with random
settings. Optionally a shadow image may be overlayed with some random rotation and
opacity.
We flip each image horizontally and supply it as a another sample with the steering
negated.
'''
num_samples = len(samples)
shadows = augment.load_shadow_images('./shadows/*.png')
while 1: # Loop forever so the generator never terminates
samples = shuffle(samples)
#divide batch_size in half, because we double each output by flipping image.
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
controls = []
for fullpath in batch_samples:
try:
data = parse_img_filepath(fullpath)
steering = data["steering"]
throttle = data["throttle"]
try:
image = Image.open(fullpath)
except:
image = None
if image is None:
print('failed to open', fullpath)
continue
#PIL Image as a numpy array
image = np.array(image)
if len(shadows) > 0 and random.uniform(0.0, 1.0) < perc_to_augment:
image = augment.augment_image(image, shadows)
center_angle = steering
images.append(image)
if conf.num_outputs == 2:
controls.append([center_angle, throttle])
elif conf.num_outputs == 1:
controls.append([center_angle])
else:
print("expected 1 or 2 ouputs")
except:
print("we threw an exception on:", fullpath)
yield [], []
# final np array to submit to training
X_train = np.array(images)
y_train = np.array(controls)
yield X_train, y_train
def get_files(filemask):
'''
use a filemask and search a path recursively for matches
'''
path, mask = os.path.split(filemask)
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
matches.append(os.path.join(root, filename))
return matches
def train_test_split(lines, test_perc):
'''
split a list into two parts, percentage of test used to seperate
'''
train = []
test = []
for line in lines:
if random.uniform(0.0, 1.0) < test_perc:
test.append(line)
else:
train.append(line)
return train, test
def make_generators(inputs, limit=None, batch_size=32, aug_perc=0.0):
'''
load the job spec from the csv and create some generator for training
'''
#get the image/steering pairs from the csv files
lines = get_files(inputs)
print("found %d files" % len(lines))
if limit is not None:
lines = lines[:limit]
print("limiting to %d files" % len(lines))
train_samples, validation_samples = train_test_split(lines, test_perc=0.2)
print("num train/val", len(train_samples), len(validation_samples))
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size, perc_to_augment=aug_perc)
validation_generator = generator(validation_samples, batch_size=batch_size, perc_to_augment=0.0)
n_train = len(train_samples)
n_val = len(validation_samples)
return train_generator, validation_generator, n_train, n_val
def go(model_name, epochs=50, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0):
print('working on model', model_name)
'''
modify config.json to select the model to train.
'''
model = models.get_nvidia_model(conf.num_outputs)
'''
display layer summary and weights info
'''
models.show_model_summary(model)
callbacks = [
keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
]
batch_size = conf.training_batch_size
#Train on session images
train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size, aug_perc=aug_perc)
if n_train == 0:
print('no training data found')
return
steps_per_epoch = n_train // batch_size
validation_steps = n_val // batch_size
print("steps_per_epoch", steps_per_epoch, "validation_steps", validation_steps)
history = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
validation_data = validation_generator,
validation_steps = validation_steps,
epochs=epochs,
verbose=1,
callbacks=callbacks)
try:
if do_plot:
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss.png')
except:
print("problems with loss graph")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train script')
parser.add_argument('model', type=str, help='model name')
parser.add_argument('--epochs', type=int, default=conf.training_default_epochs, help='number of epochs')
parser.add_argument('--inputs', default='../dataset/log/*.jpg', help='input mask to gather images')
parser.add_argument('--limit', type=int, default=None, help='max number of images to train with')
parser.add_argument('--aug_mult', type=int, default=conf.training_default_aug_mult, help='how many more images to augment')
parser.add_argument('--aug_perc', type=float, default=conf.training_default_aug_percent, help='what percentage of images to augment 0 - 1')
args = parser.parse_args()
go(args.model, epochs=args.epochs, limit=args.limit, inputs=args.inputs, aug_mult=args.aug_mult, aug_perc=args.aug_perc)
#python train.py mymodel_aug_90_x4_e200 --epochs=200 --aug_mult=4 --aug_perc=0.9
| 32.316206 | 143 | 0.632583 | from __future__ import print_function
import os
import sys
import glob
import time
import fnmatch
import argparse
import numpy as np
from PIL import Image
import keras
import conf
import random
import augment
import models
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
do_plot = True
except:
do_plot = False
def shuffle(samples):
ret_arr = []
len_samples = len(samples)
while len_samples > 0:
iSample = random.randrange(0, len_samples)
ret_arr.append(samples[iSample])
del samples[iSample]
len_samples -= 1
return ret_arr
def parse_img_filepath(filepath):
basename = os.path.basename(filepath)
f = basename[:-4]
f = f.split('_')
steering = float(f[3])
throttle = float(f[5])
data = {'steering':steering, 'throttle':throttle }
return data
def generator(samples, batch_size=32, perc_to_augment=0.5):
num_samples = len(samples)
shadows = augment.load_shadow_images('./shadows/*.png')
while 1:
samples = shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
controls = []
for fullpath in batch_samples:
try:
data = parse_img_filepath(fullpath)
steering = data["steering"]
throttle = data["throttle"]
try:
image = Image.open(fullpath)
except:
image = None
if image is None:
print('failed to open', fullpath)
continue
image = np.array(image)
if len(shadows) > 0 and random.uniform(0.0, 1.0) < perc_to_augment:
image = augment.augment_image(image, shadows)
center_angle = steering
images.append(image)
if conf.num_outputs == 2:
controls.append([center_angle, throttle])
elif conf.num_outputs == 1:
controls.append([center_angle])
else:
print("expected 1 or 2 ouputs")
except:
print("we threw an exception on:", fullpath)
yield [], []
X_train = np.array(images)
y_train = np.array(controls)
yield X_train, y_train
def get_files(filemask):
path, mask = os.path.split(filemask)
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
matches.append(os.path.join(root, filename))
return matches
def train_test_split(lines, test_perc):
train = []
test = []
for line in lines:
if random.uniform(0.0, 1.0) < test_perc:
test.append(line)
else:
train.append(line)
return train, test
def make_generators(inputs, limit=None, batch_size=32, aug_perc=0.0):
lines = get_files(inputs)
print("found %d files" % len(lines))
if limit is not None:
lines = lines[:limit]
print("limiting to %d files" % len(lines))
train_samples, validation_samples = train_test_split(lines, test_perc=0.2)
print("num train/val", len(train_samples), len(validation_samples))
train_generator = generator(train_samples, batch_size=batch_size, perc_to_augment=aug_perc)
validation_generator = generator(validation_samples, batch_size=batch_size, perc_to_augment=0.0)
n_train = len(train_samples)
n_val = len(validation_samples)
return train_generator, validation_generator, n_train, n_val
def go(model_name, epochs=50, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0):
print('working on model', model_name)
model = models.get_nvidia_model(conf.num_outputs)
models.show_model_summary(model)
callbacks = [
keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
]
batch_size = conf.training_batch_size
train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size, aug_perc=aug_perc)
if n_train == 0:
print('no training data found')
return
steps_per_epoch = n_train // batch_size
validation_steps = n_val // batch_size
print("steps_per_epoch", steps_per_epoch, "validation_steps", validation_steps)
history = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
validation_data = validation_generator,
validation_steps = validation_steps,
epochs=epochs,
verbose=1,
callbacks=callbacks)
try:
if do_plot:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss.png')
except:
print("problems with loss graph")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train script')
parser.add_argument('model', type=str, help='model name')
parser.add_argument('--epochs', type=int, default=conf.training_default_epochs, help='number of epochs')
parser.add_argument('--inputs', default='../dataset/log/*.jpg', help='input mask to gather images')
parser.add_argument('--limit', type=int, default=None, help='max number of images to train with')
parser.add_argument('--aug_mult', type=int, default=conf.training_default_aug_mult, help='how many more images to augment')
parser.add_argument('--aug_perc', type=float, default=conf.training_default_aug_percent, help='what percentage of images to augment 0 - 1')
args = parser.parse_args()
go(args.model, epochs=args.epochs, limit=args.limit, inputs=args.inputs, aug_mult=args.aug_mult, aug_perc=args.aug_perc)
| true | true |
f7251a2ca8385d7a240cf8759dc50191209cbf05 | 2,647 | py | Python | frappe/website/context.py | gangadhar-kadam/lgnlvefrape | 6c72c134d358030d3737ff63e5a4b8187e802f17 | [
"MIT"
] | 1 | 2022-03-05T16:02:39.000Z | 2022-03-05T16:02:39.000Z | frappe/website/context.py | gangadhar-kadam/lgnlvefrape | 6c72c134d358030d3737ff63e5a4b8187e802f17 | [
"MIT"
] | null | null | null | frappe/website/context.py | gangadhar-kadam/lgnlvefrape | 6c72c134d358030d3737ff63e5a4b8187e802f17 | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.doctype.website_settings.website_settings import get_website_settings
from frappe.website.template import render_blocks
from frappe.website.router import get_route_info
from frappe.website.utils import can_cache
from frappe.website.permissions import get_access
def get_context(path):
context = None
cache_key = "page_context:{}".format(path)
def add_data_path(context):
if not context.data:
context.data = {}
context.data["path"] = path
# try from memcache
if can_cache():
context = frappe.cache().get_value(cache_key)
if not context:
context = get_route_info(path)
# permission may be required for rendering
if context.doc and context.doc.doctype=="Website Group":
context["access"] = get_access(context.doc, context.pathname)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
context = build_context(context)
add_data_path(context)
if can_cache(context.no_cache):
frappe.cache().set_value(cache_key, context)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
add_data_path(context)
context.update(context.data or {})
return context
def build_context(sitemap_options):
"""get_context method of doc or module is supposed to render content templates and push it into context"""
context = frappe._dict(sitemap_options)
context.update(get_website_settings())
# provide doc
if context.doc:
context.update(context.doc.as_dict())
if hasattr(context.doc, "get_context"):
context.update(context.doc.get_context(context) or {})
elif context.controller:
module = frappe.get_module(context.controller)
if module:
if hasattr(module, "get_context"):
context.update(module.get_context(context) or {})
if hasattr(module, "get_children"):
context.get_children = module.get_children
add_metatags(context)
if context.get("base_template_path") != context.get("template") and not context.get("rendered"):
context.data = render_blocks(context)
return context
def add_metatags(context):
tags = context.get("metatags")
if tags:
if not "twitter:card" in tags:
tags["twitter:card"] = "summary"
if not "og:type" in tags:
tags["og:type"] = "article"
if tags.get("name"):
tags["og:title"] = tags["twitter:title"] = tags["name"]
if tags.get("description"):
tags["og:description"] = tags["twitter:description"] = tags["description"]
if tags.get("image"):
tags["og:image"] = tags["twitter:image:src"] = tags["image"]
| 28.771739 | 107 | 0.735172 |
from __future__ import unicode_literals
import frappe
from frappe.website.doctype.website_settings.website_settings import get_website_settings
from frappe.website.template import render_blocks
from frappe.website.router import get_route_info
from frappe.website.utils import can_cache
from frappe.website.permissions import get_access
def get_context(path):
context = None
cache_key = "page_context:{}".format(path)
def add_data_path(context):
if not context.data:
context.data = {}
context.data["path"] = path
if can_cache():
context = frappe.cache().get_value(cache_key)
if not context:
context = get_route_info(path)
if context.doc and context.doc.doctype=="Website Group":
context["access"] = get_access(context.doc, context.pathname)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
context = build_context(context)
add_data_path(context)
if can_cache(context.no_cache):
frappe.cache().set_value(cache_key, context)
else:
context["access"] = frappe._dict({"public_read":1, "public_write":1})
add_data_path(context)
context.update(context.data or {})
return context
def build_context(sitemap_options):
context = frappe._dict(sitemap_options)
context.update(get_website_settings())
if context.doc:
context.update(context.doc.as_dict())
if hasattr(context.doc, "get_context"):
context.update(context.doc.get_context(context) or {})
elif context.controller:
module = frappe.get_module(context.controller)
if module:
if hasattr(module, "get_context"):
context.update(module.get_context(context) or {})
if hasattr(module, "get_children"):
context.get_children = module.get_children
add_metatags(context)
if context.get("base_template_path") != context.get("template") and not context.get("rendered"):
context.data = render_blocks(context)
return context
def add_metatags(context):
tags = context.get("metatags")
if tags:
if not "twitter:card" in tags:
tags["twitter:card"] = "summary"
if not "og:type" in tags:
tags["og:type"] = "article"
if tags.get("name"):
tags["og:title"] = tags["twitter:title"] = tags["name"]
if tags.get("description"):
tags["og:description"] = tags["twitter:description"] = tags["description"]
if tags.get("image"):
tags["og:image"] = tags["twitter:image:src"] = tags["image"]
| true | true |
f7251ad863b8884ed1b5f58106eecd8cd3a5a1ce | 2,503 | py | Python | tomodachi/protocol/json_base.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tomodachi/protocol/json_base.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tomodachi/protocol/json_base.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | import ujson
import uuid
import time
import zlib
import base64
from typing import Any, Dict, Tuple, Union
PROTOCOL_VERSION = 'tomodachi-json-base--1.0.0'
class JsonBase(object):
@classmethod
async def build_message(cls, service: Any, topic: str, data: Any, **kwargs: Any) -> str:
data_encoding = 'raw'
if len(ujson.dumps(data)) >= 60000:
data = base64.b64encode(zlib.compress(ujson.dumps(data).encode('utf-8'))).decode('utf-8')
data_encoding = 'base64_gzip_json'
message = {
'service': {
'name': getattr(service, 'name', None),
'uuid': getattr(service, 'uuid', None)
},
'metadata': {
'message_uuid': '{}.{}'.format(getattr(service, 'uuid', ''), str(uuid.uuid4())),
'protocol_version': PROTOCOL_VERSION,
'compatible_protocol_versions': ['json_base-wip'], # deprecated
'timestamp': time.time(),
'topic': topic,
'data_encoding': data_encoding
},
'data': data
}
return ujson.dumps(message)
@classmethod
async def parse_message(cls, payload: str, **kwargs: Any) -> Union[Dict, Tuple]:
message = ujson.loads(payload)
protocol_version = message.get('metadata', {}).get('protocol_version')
message_uuid = message.get('metadata', {}).get('message_uuid')
timestamp = message.get('metadata', {}).get('timestamp')
if message.get('metadata', {}).get('data_encoding') == 'raw':
data = message.get('data')
elif message.get('metadata', {}).get('data_encoding') == 'base64_gzip_json':
data = ujson.loads(zlib.decompress(base64.b64decode(message.get('data').encode('utf-8'))).decode('utf-8'))
return {
'service': {
'name': message.get('service', {}).get('name'),
'uuid': message.get('service', {}).get('uuid')
},
'metadata': {
'message_uuid': message.get('metadata', {}).get('message_uuid'),
'protocol_version': message.get('metadata', {}).get('protocol_version'),
'timestamp': message.get('metadata', {}).get('timestamp'),
'topic': message.get('metadata', {}).get('topic'),
'data_encoding': message.get('metadata', {}).get('data_encoding')
},
'data': data
}, message_uuid, timestamp
| 39.730159 | 118 | 0.548542 | import ujson
import uuid
import time
import zlib
import base64
from typing import Any, Dict, Tuple, Union
PROTOCOL_VERSION = 'tomodachi-json-base--1.0.0'
class JsonBase(object):
@classmethod
async def build_message(cls, service: Any, topic: str, data: Any, **kwargs: Any) -> str:
data_encoding = 'raw'
if len(ujson.dumps(data)) >= 60000:
data = base64.b64encode(zlib.compress(ujson.dumps(data).encode('utf-8'))).decode('utf-8')
data_encoding = 'base64_gzip_json'
message = {
'service': {
'name': getattr(service, 'name', None),
'uuid': getattr(service, 'uuid', None)
},
'metadata': {
'message_uuid': '{}.{}'.format(getattr(service, 'uuid', ''), str(uuid.uuid4())),
'protocol_version': PROTOCOL_VERSION,
'compatible_protocol_versions': ['json_base-wip'],
'timestamp': time.time(),
'topic': topic,
'data_encoding': data_encoding
},
'data': data
}
return ujson.dumps(message)
@classmethod
async def parse_message(cls, payload: str, **kwargs: Any) -> Union[Dict, Tuple]:
message = ujson.loads(payload)
protocol_version = message.get('metadata', {}).get('protocol_version')
message_uuid = message.get('metadata', {}).get('message_uuid')
timestamp = message.get('metadata', {}).get('timestamp')
if message.get('metadata', {}).get('data_encoding') == 'raw':
data = message.get('data')
elif message.get('metadata', {}).get('data_encoding') == 'base64_gzip_json':
data = ujson.loads(zlib.decompress(base64.b64decode(message.get('data').encode('utf-8'))).decode('utf-8'))
return {
'service': {
'name': message.get('service', {}).get('name'),
'uuid': message.get('service', {}).get('uuid')
},
'metadata': {
'message_uuid': message.get('metadata', {}).get('message_uuid'),
'protocol_version': message.get('metadata', {}).get('protocol_version'),
'timestamp': message.get('metadata', {}).get('timestamp'),
'topic': message.get('metadata', {}).get('topic'),
'data_encoding': message.get('metadata', {}).get('data_encoding')
},
'data': data
}, message_uuid, timestamp
| true | true |
f7251c3cfff5728cee204b97993228189eefc64e | 2,801 | py | Python | planning/path_generator/search_path_generator.py | HybridRobotics/cbf | d8a1b376e7e910de71df60cdf3619f68c40ab3ed | [
"Apache-2.0"
] | 9 | 2022-03-07T09:12:29.000Z | 2022-03-25T01:41:49.000Z | planning/path_generator/search_path_generator.py | HybridRobotics/cbf | d8a1b376e7e910de71df60cdf3619f68c40ab3ed | [
"Apache-2.0"
] | null | null | null | planning/path_generator/search_path_generator.py | HybridRobotics/cbf | d8a1b376e7e910de71df60cdf3619f68c40ab3ed | [
"Apache-2.0"
] | null | null | null | import sys
import numpy as np
from planning.path_generator.astar import *
def plot_global_map(path, obstacles):
fig, ax = plt.subplots()
for o in obstacles:
patch = o.get_plot_patch()
ax.add_patch(patch)
ax.plot(path[:, 0], path[:, 1])
plt.xlim([-1 * 0.15, 11 * 0.15])
plt.ylim([0 * 0.15, 8 * 0.15])
plt.show()
class AstarPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.a_star(sys.get_state()[:2], goal_pos)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if True:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
class AstarLoSPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.a_star(sys.get_state()[:2], goal_pos)
path = graph.reduce_path(path)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if False:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
class ThetaStarPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=False)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.theta_star(sys.get_state()[:2], goal_pos)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if True:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
| 33.345238 | 87 | 0.633345 | import sys
import numpy as np
from planning.path_generator.astar import *
def plot_global_map(path, obstacles):
fig, ax = plt.subplots()
for o in obstacles:
patch = o.get_plot_patch()
ax.add_patch(patch)
ax.plot(path[:, 0], path[:, 1])
plt.xlim([-1 * 0.15, 11 * 0.15])
plt.ylim([0 * 0.15, 8 * 0.15])
plt.show()
class AstarPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.a_star(sys.get_state()[:2], goal_pos)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if True:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
class AstarLoSPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=quad)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.a_star(sys.get_state()[:2], goal_pos)
path = graph.reduce_path(path)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if False:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
class ThetaStarPathGenerator:
def __init__(self, grid, quad, margin):
self._global_path = None
self._grid = GridMap(bounds=grid[0], cell_size=grid[1], quad=False)
self._margin = margin
def generate_path(self, sys, obstacles, goal_pos):
graph = GraphSearch(graph=self._grid, obstacles=obstacles, margin=self._margin)
path = graph.theta_star(sys.get_state()[:2], goal_pos)
self._global_path = np.array([p.pos for p in path])
print(self._global_path)
if self._global_path == []:
print("Global Path not found.")
sys.exit(1)
if True:
plot_global_map(self._global_path, obstacles)
return self._global_path
def logging(self, logger):
logger._paths.append(self._global_path)
| true | true |
f7251d422b29b0275ce1c312bda2c4763835c059 | 33,303 | py | Python | pytorch/pytorchcv/models/common.py | HyperGAN/imgclsmob | 88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 | [
"MIT"
] | null | null | null | pytorch/pytorchcv/models/common.py | HyperGAN/imgclsmob | 88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 | [
"MIT"
] | null | null | null | pytorch/pytorchcv/models/common.py | HyperGAN/imgclsmob | 88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 | [
"MIT"
] | null | null | null | """
Common routines for models in PyTorch.
"""
__all__ = ['HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN', 'Identity', 'DualPathSequential',
'Concurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
activation=(lambda: nn.ReLU(inplace=True))):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return conv5x5_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
activation : function, or str, or nn.Module
Activation function or name of activation function.
"""
def __init__(self,
channels,
reduction=16,
approx_sigmoid=False,
activation=(lambda: nn.ReLU(inplace=True))):
super(SEBlock, self).__init__()
mid_cannels = channels // reduction
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_cannels,
bias=True)
self.activ = get_activation_layer(activation)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=channels,
bias=True)
self.sigmoid = HSigmoid() if approx_sigmoid else nn.Sigmoid()
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'con'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
"""
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
| 30.111212 | 120 | 0.58283 |
__all__ = ['HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN', 'Identity', 'DualPathSequential',
'Concurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return conv5x5_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class PreConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
def channel_shuffle(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
def __init__(self,
channels,
reduction=16,
approx_sigmoid=False,
activation=(lambda: nn.ReLU(inplace=True))):
super(SEBlock, self).__init__()
mid_cannels = channels // reduction
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_cannels,
bias=True)
self.activ = get_activation_layer(activation)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=channels,
bias=True)
self.sigmoid = HSigmoid() if approx_sigmoid else nn.Sigmoid()
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class DualPathSequential(nn.Sequential):
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
| true | true |
f7251e850c38e0f28697e00d751ee3f8dca92056 | 7,888 | py | Python | dynamic_image_networks/hmdb51/training_scripts/train_resnext50_hmdb51.py | DoranLyong/dynamic-images-for-action-recognition | 06a68c2337b45c44a8c7ec50e94585a9b9615ad0 | [
"MIT"
] | 22 | 2018-09-14T00:32:41.000Z | 2020-10-23T11:19:12.000Z | dynamic_image_networks/hmdb51/training_scripts/train_resnext50_hmdb51.py | DoranLyong/dynamic-images-for-action-recognition | 06a68c2337b45c44a8c7ec50e94585a9b9615ad0 | [
"MIT"
] | 1 | 2021-04-30T04:09:40.000Z | 2021-04-30T04:09:40.000Z | dynamic_image_networks/hmdb51/training_scripts/train_resnext50_hmdb51.py | DoranLyong/dynamic-images-for-action-recognition | 06a68c2337b45c44a8c7ec50e94585a9b9615ad0 | [
"MIT"
] | 7 | 2018-11-01T02:32:09.000Z | 2020-10-03T12:19:02.000Z | # import apex - !!!! INCLUDE THIS IMPORT IF YOU WANT TO USE MIXED PRECISION TRAINING !!!!
import torch
import os
import sys
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
from tqdm import tqdm
from pathlib import Path
# Make sure that the project root is in your PATH (i.e., the parent folder containing 'dynamic_image_networks').
sys.path.append(str(Path('../../..').resolve()))
# ---------------------------------------------------------------
# Model / dataset choice
# ---------------------------------------------------------------
from dynamic_image_networks.hmdb51.models.resnext50_temppool import get_model
from dynamic_image_networks.hmdb51.dataloaders.hmdb51_dataloader import get_train_loader
from dynamic_image_networks.hmdb51.utilities.calculate_training_metrics import calculate_accuracy
from dynamic_image_networks.hmdb51.utilities.logger import initialize_logger
from dynamic_image_networks.hmdb51.utilities.meters import AverageMeter
def main():
# ============================================================================================
# Setup
# ============================================================================================
# ---------------------------------------------------------------
# Random seeds
# ---------------------------------------------------------------
torch.manual_seed(590238490)
torch.backends.cudnn.benchmark = True
# ---------------------------------------------------------------
# GPU
# ---------------------------------------------------------------
device = torch.device("cuda:0")
fp16 = False
if fp16:
print('!!! MIXED PRECISION TRAINING IS ENABLED -- ONLY USE FOR VOLTA AND TURING GPUs!!!')
# ---------------------------------------------------------------
# Training settings
# ---------------------------------------------------------------
batch_size = 32
num_epochs = 60
num_workers = 6
max_segment_size = 10
save_best_models = True
image_augmentation = False
# ----------------------------------------------------------------------------
# Get the model
# ----------------------------------------------------------------------------
net = get_model(num_classes=51)
net.to(device)
# ----------------------------------------------------------------------------
# Initialize optimizer and loss function
# ----------------------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=3e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)
if fp16:
net, optimizer = apex.amp.initialize(net, optimizer, opt_level="O1")
# ---------------------------------------------------------------
# Logging set-up
# ---------------------------------------------------------------
# File-name
file_name = ''.join(os.path.basename(__file__).split('.py')[:-1])
logger = initialize_logger(file_name, log_dir='./logs/')
# ============================================================================================
# Train
# ============================================================================================
time_start = datetime.now()
fold_i = 1
# ---------------------------------------------------------------
# Load dataloaders
# ---------------------------------------------------------------
train_loader, validation_loader = get_train_loader(fold_id=fold_i,
batch_size=batch_size,
num_workers=num_workers,
image_augmenation=image_augmentation,
segment_size=max_segment_size)
logger.info('Starting Training on Fold: {}\n'.format(fold_i))
best_val_loss = float('inf')
best_val_acc = 0
for epoch_i in range(num_epochs):
# ---------------------------------------------------------------
# Training and validation loop
# ---------------------------------------------------------------
avg_loss, avg_acc = training_loop('train', net, device, train_loader,
optimizer, criterion, fp16)
avg_val_loss, avg_val_acc = training_loop('val', net, device, validation_loader,
None, criterion, fp16)
if scheduler:
scheduler.step(avg_val_loss)
# ---------------------------------------------------------------
# Track the best model
# ---------------------------------------------------------------
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
if save_best_models:
logger.info('Saving model because of best loss...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_loss_state.pt'.format(file_name, fold_i))
if avg_val_acc > best_val_acc:
best_val_acc = avg_val_acc
if save_best_models:
logger.info('Saving model because of best acc...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_acc_state.pt'.format(file_name, fold_i))
# ---------------------------------------------------------------
# Log the training status
# ---------------------------------------------------------------
time_elapsed = datetime.now() - time_start
output_msg = 'Fold {}, Epoch: {}/{}\n' \
'---------------------\n' \
'train loss: {:.6f}, val loss: {:.6f}\n' \
'train acc: {:.6f}, val acc: {:.6f}\n' \
'best val loss: {:.6f}, best val acc: {:.6f}\n' \
'time elapsed: {}\n'. \
format(fold_i, epoch_i, num_epochs - 1,
avg_loss, avg_val_loss,
avg_acc, avg_val_acc,
best_val_loss, best_val_acc,
str(time_elapsed).split('.')[0])
logger.info(output_msg)
logger.info('Finished Training')
def training_loop(phase, net, device, dataloader, optimizer, criterion, fp16):
loss_meter = AverageMeter()
acc_meter = AverageMeter()
# Set the model into the appropriate mode.
if phase == 'train':
net.train()
elif phase == 'val':
net.eval()
else:
raise ValueError
# Enable gradient accumulation only for the training phase.
with torch.set_grad_enabled(phase == 'train'):
for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
x, y, = data
x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)
# Prediction.
y_pred = net(x).float()
# Loss and step.
loss = criterion(y_pred, y)
if phase == 'train':
optimizer.zero_grad()
if fp16 is True:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# Metrics
batch_size = len(y)
loss_meter.add(loss.item(), batch_size)
acc_meter.add(calculate_accuracy(y_pred, y), batch_size)
avg_loss = loss_meter.get_average()
avg_acc = acc_meter.get_average()
return avg_loss, avg_acc
if __name__ == '__main__':
main()
| 41.083333 | 112 | 0.439021 |
import torch
import os
import sys
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
from tqdm import tqdm
from pathlib import Path
sys.path.append(str(Path('../../..').resolve()))
from dynamic_image_networks.hmdb51.models.resnext50_temppool import get_model
from dynamic_image_networks.hmdb51.dataloaders.hmdb51_dataloader import get_train_loader
from dynamic_image_networks.hmdb51.utilities.calculate_training_metrics import calculate_accuracy
from dynamic_image_networks.hmdb51.utilities.logger import initialize_logger
from dynamic_image_networks.hmdb51.utilities.meters import AverageMeter
def main():
torch.manual_seed(590238490)
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0")
fp16 = False
if fp16:
print('!!! MIXED PRECISION TRAINING IS ENABLED -- ONLY USE FOR VOLTA AND TURING GPUs!!!')
batch_size = 32
num_epochs = 60
num_workers = 6
max_segment_size = 10
save_best_models = True
image_augmentation = False
net = get_model(num_classes=51)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=3e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)
if fp16:
net, optimizer = apex.amp.initialize(net, optimizer, opt_level="O1")
file_name = ''.join(os.path.basename(__file__).split('.py')[:-1])
logger = initialize_logger(file_name, log_dir='./logs/')
time_start = datetime.now()
fold_i = 1
train_loader, validation_loader = get_train_loader(fold_id=fold_i,
batch_size=batch_size,
num_workers=num_workers,
image_augmenation=image_augmentation,
segment_size=max_segment_size)
logger.info('Starting Training on Fold: {}\n'.format(fold_i))
best_val_loss = float('inf')
best_val_acc = 0
for epoch_i in range(num_epochs):
avg_loss, avg_acc = training_loop('train', net, device, train_loader,
optimizer, criterion, fp16)
avg_val_loss, avg_val_acc = training_loop('val', net, device, validation_loader,
None, criterion, fp16)
if scheduler:
scheduler.step(avg_val_loss)
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
if save_best_models:
logger.info('Saving model because of best loss...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_loss_state.pt'.format(file_name, fold_i))
if avg_val_acc > best_val_acc:
best_val_acc = avg_val_acc
if save_best_models:
logger.info('Saving model because of best acc...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_acc_state.pt'.format(file_name, fold_i))
time_elapsed = datetime.now() - time_start
output_msg = 'Fold {}, Epoch: {}/{}\n' \
'---------------------\n' \
'train loss: {:.6f}, val loss: {:.6f}\n' \
'train acc: {:.6f}, val acc: {:.6f}\n' \
'best val loss: {:.6f}, best val acc: {:.6f}\n' \
'time elapsed: {}\n'. \
format(fold_i, epoch_i, num_epochs - 1,
avg_loss, avg_val_loss,
avg_acc, avg_val_acc,
best_val_loss, best_val_acc,
str(time_elapsed).split('.')[0])
logger.info(output_msg)
logger.info('Finished Training')
def training_loop(phase, net, device, dataloader, optimizer, criterion, fp16):
loss_meter = AverageMeter()
acc_meter = AverageMeter()
if phase == 'train':
net.train()
elif phase == 'val':
net.eval()
else:
raise ValueError
with torch.set_grad_enabled(phase == 'train'):
for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
x, y, = data
x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)
y_pred = net(x).float()
loss = criterion(y_pred, y)
if phase == 'train':
optimizer.zero_grad()
if fp16 is True:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
batch_size = len(y)
loss_meter.add(loss.item(), batch_size)
acc_meter.add(calculate_accuracy(y_pred, y), batch_size)
avg_loss = loss_meter.get_average()
avg_acc = acc_meter.get_average()
return avg_loss, avg_acc
if __name__ == '__main__':
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.