blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4acaa351f88448965dc205002d3a57d51c5f629c
|
3daa53a2190f365ee2e2acae39ca4e84919f2f50
|
/test/functional/tests.py
|
ff3cf5c934e20a0e67c88c72186671bdeea8b867
|
[
"Apache-2.0"
] |
permissive
|
openstack/swift
|
4c8e4a14c1c6f7efb049f983ede28e89bd2e9140
|
f06e5369579599648cc78e4b556887bc6d978c2b
|
refs/heads/master
| 2023-08-28T15:04:33.200849
| 2023-08-24T20:35:07
| 2023-08-24T21:05:48
| 790,019
| 2,370
| 957
|
Apache-2.0
| 2023-06-22T02:45:53
| 2010-07-22T01:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 125,316
|
py
|
tests.py
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import io
import locale
import random
import six
from six.moves import urllib
import time
import unittest
import uuid
from copy import deepcopy
import eventlet
from swift.common.http import is_success, is_client_error
from swift.common.swob import normalize_etag
from swift.common.utils import md5
from email.utils import parsedate
if six.PY2:
from email.parser import FeedParser
else:
from email.parser import BytesFeedParser as FeedParser
import mock
from test.functional import normalized_urls, load_constraint, cluster_info
from test.functional import check_response, retry
import test.functional as tf
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError, SkipTest
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
return uuid.uuid4().hex
@classmethod
def create_utf8_name(cls, length=None):
if length is None:
length = 15
else:
length = int(length)
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
ustr = u''.join([random.choice(utf8_chars)
for x in range(length)])
if six.PY2:
return ustr.encode('utf-8')
return ustr
create_name = create_ascii_name
class BaseEnv(object):
account = conn = None
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
@classmethod
def tearDown(cls):
pass
class Base(unittest.TestCase):
env = BaseEnv
@classmethod
def tearDownClass(cls):
cls.env.tearDown()
@classmethod
def setUpClass(cls):
cls.env.setUp()
def setUp(self):
if tf.in_process:
tf.skip_if_no_xattrs()
def assert_body(self, body):
if not isinstance(body, bytes):
body = body.encode('utf-8')
response_body = self.env.conn.response.read()
self.assertEqual(response_body, body,
'Body returned: %s' % (response_body))
def assert_status(self, status_or_statuses):
self.assertTrue(
self.env.conn.response.status == status_or_statuses or
(hasattr(status_or_statuses, '__iter__') and
self.env.conn.response.status in status_or_statuses),
'Status returned: %d Expected: %s' %
(self.env.conn.response.status, status_or_statuses))
def assert_header(self, header_name, expected_value):
try:
actual_value = self.env.conn.response.getheader(header_name)
except KeyError:
self.fail(
'Expected header name %r not found in response.' % header_name)
self.assertEqual(expected_value, actual_value)
def assert_etag(self, unquoted_value):
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
expected = '"%s"' % unquoted_value
else:
expected = unquoted_value
self.assert_header('etag', expected)
class Base2(object):
@classmethod
def setUpClass(cls):
Utils.create_name = Utils.create_utf8_name
super(Base2, cls).setUpClass()
@classmethod
def tearDownClass(cls):
Utils.create_name = Utils.create_ascii_name
class TestAccountEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestAccountEnv, cls).setUp()
cls.containers = []
for i in range(10):
cont = cls.account.container(Utils.create_name())
if not cont.create():
raise ResponseError(cls.conn.response)
cls.containers.append(cont)
class TestAccountDev(Base):
env = TestAccountEnv
class TestAccountDevUTF8(Base2, TestAccountDev):
pass
class TestAccount(Base):
env = TestAccountEnv
def testNoAuthToken(self):
self.assertRaises(ResponseError, self.env.account.info,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
self.assertRaises(ResponseError, self.env.account.containers,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
def testInvalidUTF8Path(self):
valid_utf8 = Utils.create_utf8_name()
if six.PY2:
invalid_utf8 = valid_utf8[::-1]
else:
invalid_utf8 = (valid_utf8.encode('utf8')[::-1]).decode(
'utf-8', 'surrogateescape')
container = self.env.account.container(invalid_utf8)
self.assertFalse(container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assert_body('Invalid UTF8 or contains NULL')
def testVersionOnlyPath(self):
self.env.account.conn.make_request('PUT',
cfg={'version_only_path': True})
self.assert_status(412)
self.assert_body('Bad URL')
def testInvalidPath(self):
was_path = self.env.account.conn.storage_path
if (normalized_urls):
self.env.account.conn.storage_path = '/'
else:
self.env.account.conn.storage_path = "/%s" % was_path
try:
self.env.account.conn.make_request('GET')
self.assert_status(404)
finally:
self.env.account.conn.storage_path = was_path
def testPUTError(self):
if load_constraint('allow_account_management'):
raise SkipTest("Allow account management is enabled")
self.env.account.conn.make_request('PUT')
self.assert_status([403, 405])
def testAccountHead(self):
try_count = 0
while try_count < 5:
try_count += 1
info = self.env.account.info()
for field in ['object_count', 'container_count', 'bytes_used']:
self.assertGreaterEqual(info[field], 0)
if info['container_count'] == len(self.env.containers):
break
if try_count < 5:
time.sleep(1)
self.assertEqual(info['container_count'], len(self.env.containers))
self.assert_status(204)
def testContainerSerializedInfo(self):
container_info = {}
for container in self.env.containers:
info = {'bytes': 0}
info['count'] = random.randint(10, 30)
for i in range(info['count']):
file_item = container.file(Utils.create_name())
bytes = random.randint(1, 32768)
file_item.write_random(bytes)
info['bytes'] += bytes
container_info[container.name] = info
for format_type in ['json', 'xml']:
for a in self.env.account.containers(
parms={'format': format_type}):
self.assertGreaterEqual(a['count'], 0)
self.assertGreaterEqual(a['bytes'], 0)
headers = dict((k.lower(), v)
for k, v in self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEqual(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEqual(headers['content-type'],
'application/xml; charset=utf-8')
def testListingLimit(self):
limit = load_constraint('account_listing_limit')
for lim in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
p = {'limit': lim}
if lim <= limit:
self.assertLessEqual(len(self.env.account.containers(parms=p)),
lim)
self.assert_status(200)
else:
self.assertRaises(ResponseError,
self.env.account.containers, parms=p)
self.assert_status(412)
def testContainerListing(self):
a = sorted([c.name for c in self.env.containers])
for format_type in [None, 'json', 'xml']:
b = self.env.account.containers(parms={'format': format_type})
if isinstance(b[0], dict):
b = [x['name'] for x in b]
self.assertEqual(a, b)
def testListDelimiter(self):
delimiter = '-'
containers = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])]
for c in containers:
cont = self.env.account.container(c)
self.assertTrue(cont.create())
results = self.env.account.containers(parms={'delimiter': delimiter})
expected = ['test', 'test-']
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
results = self.env.account.containers(parms={'delimiter': delimiter,
'reverse': 'yes'})
expected.reverse()
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
def testListMultiCharDelimiter(self):
delimiter = '-&'
containers = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])]
for c in containers:
cont = self.env.account.container(c)
self.assertTrue(cont.create())
results = self.env.account.containers(parms={'delimiter': delimiter})
expected = ['test', 'test-&']
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
results = self.env.account.containers(parms={'delimiter': delimiter,
'reverse': 'yes'})
expected.reverse()
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
def testListDelimiterAndPrefix(self):
delimiter = 'a'
containers = ['bar', 'bazar']
for c in containers:
cont = self.env.account.container(c)
self.assertTrue(cont.create())
results = self.env.account.containers(parms={'delimiter': delimiter,
'prefix': 'ba'})
expected = ['bar', 'baza']
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
results = self.env.account.containers(parms={'delimiter': delimiter,
'prefix': 'ba',
'reverse': 'yes'})
expected.reverse()
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
def testContainerListingLastModified(self):
expected = {}
for container in self.env.containers:
res = container.info()
expected[container.name] = time.mktime(
parsedate(res['last_modified']))
for format_type in ['json', 'xml']:
actual = {}
containers = self.env.account.containers(
parms={'format': format_type})
if isinstance(containers[0], dict):
for container in containers:
self.assertIn('name', container) # sanity
self.assertIn('last_modified', container) # sanity
# ceil by hand (wants easier way!)
datetime_str, micro_sec_str = \
container['last_modified'].split('.')
timestamp = time.mktime(
time.strptime(datetime_str,
"%Y-%m-%dT%H:%M:%S"))
if int(micro_sec_str):
timestamp += 1
actual[container['name']] = timestamp
self.assertEqual(expected, actual)
def testInvalidAuthToken(self):
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
self.assert_status(401)
def testLastContainerMarker(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers(parms={
'format': format_type})
self.assertEqual(len(containers), len(self.env.containers))
self.assert_status(200)
marker = (containers[-1] if format_type is None
else containers[-1]['name'])
containers = self.env.account.containers(
parms={'format': format_type, 'marker': marker})
self.assertEqual(len(containers), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testMarkerLimitContainerList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, 9)
containers = self.env.account.containers(
parms={'format': format_type,
'marker': marker,
'limit': limit})
self.assertLessEqual(len(containers), limit)
if containers:
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assertGreater(locale.strcoll(containers[0], marker),
0)
def testContainersOrderedByName(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers(
parms={'format': format_type})
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assertEqual(sorted(containers, key=locale.strxfrm),
containers)
def testQuotedWWWAuthenticateHeader(self):
# check that the www-authenticate header value with the swift realm
# is correctly quoted.
conn = Connection(tf.config)
conn.authenticate()
inserted_html = '<b>Hello World'
hax = 'AUTH_haxx"\nContent-Length: %d\n\n%s' % (len(inserted_html),
inserted_html)
quoted_hax = urllib.parse.quote(hax)
conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
resp = conn.connection.getresponse()
resp_headers = {}
for h, v in resp.getheaders():
h = h.lower()
if h in resp_headers:
# py2 would do this for us, but py3 apparently keeps them
# separate? Not sure which I like more...
resp_headers[h] += ',' + v
else:
resp_headers[h] = v
self.assertIn('www-authenticate', resp_headers)
actual = resp_headers['www-authenticate']
expected = 'Swift realm="%s"' % quoted_hax
# other middleware e.g. auth_token may also set www-authenticate
# headers in which case actual values will be a comma separated list.
# check that expected value is among the actual values
self.assertIn(expected, actual)
class TestAccountUTF8(Base2, TestAccount):
pass
class TestAccountNoContainers(Base):
def testGetRequest(self):
for format_type in [None, 'json', 'xml']:
self.assertFalse(self.env.account.containers(
parms={'format': format_type}))
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
pass
class TestAccountSortingEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestAccountSortingEnv, cls).setUp()
postfix = Utils.create_name()
cls.cont_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
cls.cont_items = ['%s%s' % (x, postfix) for x in cls.cont_items]
for container in cls.cont_items:
c = cls.account.container(container)
if not c.create():
raise ResponseError(cls.conn.response)
class TestAccountSorting(Base):
env = TestAccountSortingEnv
def testAccountContainerListSorting(self):
# name (byte order) sorting.
cont_list = sorted(self.env.cont_items)
for reverse in ('false', 'no', 'off', '', 'garbage'):
cont_listing = self.env.account.containers(
parms={'reverse': reverse})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing,
'Expected %s but got %s with reverse param %r'
% (cont_list, cont_listing, reverse))
def testAccountContainerListSortingReverse(self):
# name (byte order) sorting.
cont_list = sorted(self.env.cont_items)
cont_list.reverse()
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
cont_listing = self.env.account.containers(
parms={'reverse': reverse})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing,
'Expected %s but got %s with reverse param %r'
% (cont_list, cont_listing, reverse))
def testAccountContainerListSortingByPrefix(self):
cont_list = sorted(c for c in self.env.cont_items if c.startswith('a'))
cont_list.reverse()
cont_listing = self.env.account.containers(parms={
'reverse': 'on', 'prefix': 'a'})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testAccountContainerListSortingByMarkersExclusive(self):
first_item = self.env.cont_items[3] # 'b1' + postfix
last_item = self.env.cont_items[4] # 'B2' + postfix
cont_list = sorted(c for c in self.env.cont_items
if last_item < c < first_item)
cont_list.reverse()
cont_listing = self.env.account.containers(parms={
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testAccountContainerListSortingByMarkersInclusive(self):
first_item = self.env.cont_items[3] # 'b1' + postfix
last_item = self.env.cont_items[4] # 'B2' + postfix
cont_list = sorted(c for c in self.env.cont_items
if last_item <= c <= first_item)
cont_list.reverse()
cont_listing = self.env.account.containers(parms={
'reverse': 'on', 'marker': first_item + '\x00',
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testAccountContainerListSortingByReversedMarkers(self):
cont_listing = self.env.account.containers(parms={
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
self.assert_status(204)
self.assertEqual([], cont_listing)
class TestContainerEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestContainerEnv, cls).setUp()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 10
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item.name)
class TestContainerDev(Base):
env = TestContainerEnv
class TestContainerDevUTF8(Base2, TestContainerDev):
pass
class TestContainer(Base):
env = TestContainerEnv
def testContainerNameLimit(self):
limit = load_constraint('max_container_name_length')
for lim in (limit - 100, limit - 10, limit - 1, limit,
limit + 1, limit + 10, limit + 100):
cont = self.env.account.container('a' * lim)
if lim <= limit:
self.assertTrue(cont.create())
self.assert_status((201, 202))
else:
self.assertFalse(cont.create())
self.assert_status(400)
def testFileThenContainerDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
file_item = cont.file(Utils.create_name())
self.assertTrue(file_item.write_random())
self.assertTrue(file_item.delete())
self.assert_status(204)
self.assertNotIn(file_item.name, cont.files())
self.assertTrue(cont.delete())
self.assert_status(204)
self.assertNotIn(cont.name, self.env.account.containers())
def testFileListingLimitMarkerPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
files = sorted([Utils.create_name() for x in range(10)])
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
for i in range(len(files)):
f = files[i]
for j in range(1, len(files) - i):
self.assertEqual(cont.files(parms={'limit': j, 'marker': f}),
files[i + 1: i + j + 1])
self.assertEqual(cont.files(parms={'marker': f}), files[i + 1:])
self.assertEqual(cont.files(parms={'marker': f, 'prefix': f}), [])
self.assertEqual(cont.files(parms={'prefix': f}), [f])
def testPrefixAndLimit(self):
load_constraint('container_listing_limit')
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
prefix_file_count = 10
limit_count = 2
prefixs = ['alpha/', 'beta/', 'kappa/']
prefix_files = {}
for prefix in prefixs:
prefix_files[prefix] = []
for i in range(prefix_file_count):
file_item = cont.file(prefix + Utils.create_name())
file_item.write()
prefix_files[prefix].append(file_item.name)
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'prefix': prefix,
'format': format_type})
if isinstance(files[0], dict):
files = [x.get('name', x.get('subdir')) for x in files]
self.assertEqual(files, sorted(prefix_files[prefix]))
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'limit': limit_count,
'prefix': prefix,
'format': format_type})
if isinstance(files[0], dict):
files = [x.get('name', x.get('subdir')) for x in files]
self.assertEqual(len(files), limit_count)
for file_item in files:
self.assertTrue(file_item.startswith(prefix))
def testListDelimiter(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
delimiter = '-'
files = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])]
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
for format_type in [None, 'json', 'xml']:
results = cont.files(parms={'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-bar', 'test-foo'])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-'])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type,
'reverse': 'yes'})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test-', 'test'])
def testListMultiCharDelimiter(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
delimiter = '-&'
files = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo']), "test-'baz"]
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
for format_type in [None, 'json', 'xml']:
results = cont.files(parms={'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-&bar', 'test-&foo',
"test-'baz"])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-&', "test-'baz"])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type,
'reverse': 'yes'})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ["test-'baz", 'test-&', 'test'])
def testListDelimiterAndPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
delimiter = 'a'
files = ['bar', 'bazar']
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'})
self.assertEqual(results, ['bar', 'baza'])
results = cont.files(parms={'delimiter': delimiter,
'prefix': 'ba',
'reverse': 'yes'})
self.assertEqual(results, ['baza', 'bar'])
def testLeadingDelimiter(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
delimiter = '/'
files = ['test', delimiter.join(['', 'test', 'bar']),
delimiter.join(['', 'test', 'bar', 'foo'])]
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
results = cont.files(parms={'delimiter': delimiter})
self.assertEqual(results, [delimiter, 'test'])
def testCreate(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
self.assert_status(201)
self.assertIn(cont.name, self.env.account.containers())
def testContainerFileListOnContainerThatDoesNotExist(self):
for format_type in [None, 'json', 'xml']:
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.files,
parms={'format': format_type})
self.assert_status(404)
def testUtf8Container(self):
valid_utf8 = Utils.create_utf8_name()
if six.PY2:
invalid_utf8 = valid_utf8[::-1]
else:
invalid_utf8 = (valid_utf8.encode('utf8')[::-1]).decode(
'utf-8', 'surrogateescape')
container = self.env.account.container(valid_utf8)
self.assertTrue(container.create(cfg={'no_path_quote': True}))
self.assertIn(container.name, self.env.account.containers())
self.assertEqual(container.files(), [])
self.assertTrue(container.delete())
container = self.env.account.container(invalid_utf8)
self.assertFalse(container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assertRaises(ResponseError, container.files,
cfg={'no_path_quote': True})
self.assert_status(412)
def testCreateOnExisting(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
self.assert_status(201)
self.assertTrue(cont.create())
self.assert_status(202)
def testSlashInName(self):
if six.PY2:
cont_name = list(Utils.create_name().decode('utf-8'))
else:
cont_name = list(Utils.create_name())
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
cont_name = ''.join(cont_name)
if six.PY2:
cont_name = cont_name.encode('utf-8')
cont = self.env.account.container(cont_name)
self.assertFalse(cont.create(cfg={'no_path_quote': True}),
'created container with name %s' % (cont_name))
self.assert_status(404)
self.assertNotIn(cont.name, self.env.account.containers())
def testDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
self.assert_status(201)
self.assertTrue(cont.delete())
self.assert_status(204)
self.assertNotIn(cont.name, self.env.account.containers())
def testDeleteOnContainerThatDoesNotExist(self):
cont = self.env.account.container(Utils.create_name())
self.assertFalse(cont.delete())
self.assert_status(404)
def testDeleteOnContainerWithFiles(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
file_item = cont.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assertIn(file_item.name, cont.files())
self.assertFalse(cont.delete())
self.assert_status(409)
def testFileCreateInContainerThatDoesNotExist(self):
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
Utils.create_name())
self.assertRaises(ResponseError, file_item.write)
self.assert_status(404)
def testLastFileMarker(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
self.assertEqual(len(files), len(self.env.files))
self.assert_status(200)
marker = files[-1] if format_type is None else files[-1]['name']
files = self.env.container.files(
parms={'format': format_type, 'marker': marker})
self.assertEqual(len(files), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testContainerFileList(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
self.assert_status(200)
if isinstance(files[0], dict):
files = [x['name'] for x in files]
for file_item in self.env.files:
self.assertIn(file_item, files)
for file_item in files:
self.assertIn(file_item, self.env.files)
def _testContainerFormattedFileList(self, format_type):
expected = {}
for name in self.env.files:
expected[name] = self.env.container.file(name).info()
file_list = self.env.container.files(parms={'format': format_type})
self.assert_status(200)
for actual in file_list:
name = actual['name']
self.assertIn(name, expected)
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
self.assertEqual(expected[name]['etag'],
'"%s"' % actual['hash'])
else:
self.assertEqual(expected[name]['etag'], actual['hash'])
self.assertEqual(
expected[name]['content_type'], actual['content_type'])
self.assertEqual(
expected[name]['content_length'], actual['bytes'])
expected.pop(name)
self.assertFalse(expected) # sanity check
def testContainerJsonFileList(self):
self._testContainerFormattedFileList('json')
def testContainerXmlFileList(self):
self._testContainerFormattedFileList('xml')
def testMarkerLimitFileList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, self.env.file_count - 1)
files = self.env.container.files(parms={'format': format_type,
'marker': marker,
'limit': limit})
if not files:
continue
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertLessEqual(len(files), limit)
if files:
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertGreater(locale.strcoll(files[0], marker), 0)
def testFileOrder(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertEqual(sorted(files, key=locale.strxfrm), files)
def testContainerInfo(self):
info = self.env.container.info()
self.assert_status(204)
self.assertEqual(info['object_count'], self.env.file_count)
self.assertEqual(info['bytes_used'],
self.env.file_count * self.env.file_size)
def testContainerInfoOnContainerThatDoesNotExist(self):
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.info)
self.assert_status(404)
def testContainerFileListWithLimit(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type,
'limit': 2})
self.assertEqual(len(files), 2)
def testContainerExistenceCachingProblem(self):
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assertTrue(cont.create())
self.assertEqual(cont.files(), [])
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assertTrue(cont.create())
# NB: no GET! Make sure the PUT cleared the cached 404
file_item = cont.file(Utils.create_name())
file_item.write_random()
def testContainerLastModified(self):
container = self.env.account.container(Utils.create_name())
self.assertTrue(container.create())
info = container.info()
t0 = info['last_modified']
# last modified header is in date format which supports in second
# so we need to wait to increment a sec in the header.
eventlet.sleep(1)
# POST container change last modified timestamp
self.assertTrue(
container.update_metadata({'x-container-meta-japan': 'mitaka'}))
info = container.info()
t1 = info['last_modified']
self.assertNotEqual(t0, t1)
eventlet.sleep(1)
# PUT container (overwrite) also change last modified
self.assertTrue(container.create())
info = container.info()
t2 = info['last_modified']
self.assertNotEqual(t1, t2)
eventlet.sleep(1)
# PUT object doesn't change container last modified timestamp
obj = container.file(Utils.create_name())
self.assertTrue(
obj.write(b"aaaaa", hdrs={'Content-Type': 'text/plain'}))
info = container.info()
t3 = info['last_modified']
self.assertEqual(t2, t3)
# POST object also doesn't change container last modified timestamp
self.assertTrue(
obj.sync_metadata({'us': 'austin'}))
info = container.info()
t4 = info['last_modified']
self.assertEqual(t2, t4)
class TestContainerUTF8(Base2, TestContainer):
pass
class TestContainerSortingEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestContainerSortingEnv, cls).setUp()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
cls.files = list()
cls.file_size = 128
for name in cls.file_items:
file_item = cls.container.file(name)
file_item.write_random(cls.file_size)
cls.files.append(file_item.name)
class TestContainerSorting(Base):
env = TestContainerSortingEnv
def testContainerFileListSortingReversed(self):
file_list = list(sorted(self.env.file_items))
file_list.reverse()
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
cont_files = self.env.container.files(parms={'reverse': reverse})
self.assert_status(200)
self.assertEqual(file_list, cont_files,
'Expected %s but got %s with reverse param %r'
% (file_list, cont_files, reverse))
def testContainerFileSortingByPrefixReversed(self):
cont_list = sorted(c for c in self.env.file_items if c.startswith('a'))
cont_list.reverse()
cont_listing = self.env.container.files(parms={
'reverse': 'on', 'prefix': 'a'})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testContainerFileSortingByMarkersExclusiveReversed(self):
first_item = self.env.file_items[3] # 'b1' + postfix
last_item = self.env.file_items[4] # 'B2' + postfix
cont_list = sorted(c for c in self.env.file_items
if last_item < c < first_item)
cont_list.reverse()
cont_listing = self.env.container.files(parms={
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testContainerFileSortingByMarkersInclusiveReversed(self):
first_item = self.env.file_items[3] # 'b1' + postfix
last_item = self.env.file_items[4] # 'B2' + postfix
cont_list = sorted(c for c in self.env.file_items
if last_item <= c <= first_item)
cont_list.reverse()
cont_listing = self.env.container.files(parms={
'reverse': 'on', 'marker': first_item + '\x00',
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
self.assert_status(200)
self.assertEqual(cont_list, cont_listing)
def testContainerFileSortingByReversedMarkersReversed(self):
cont_listing = self.env.container.files(parms={
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
self.assert_status(204)
self.assertEqual([], cont_listing)
def testContainerFileListSorting(self):
file_list = list(sorted(self.env.file_items))
cont_files = self.env.container.files()
self.assert_status(200)
self.assertEqual(file_list, cont_files)
# Lets try again but with reverse is specifically turned off
cont_files = self.env.container.files(parms={'reverse': 'off'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
cont_files = self.env.container.files(parms={'reverse': 'false'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
cont_files = self.env.container.files(parms={'reverse': 'no'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
cont_files = self.env.container.files(parms={'reverse': ''})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
# Lets try again but with a incorrect reverse values
cont_files = self.env.container.files(parms={'reverse': 'foo'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
cont_files = self.env.container.files(parms={'reverse': 'hai'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
cont_files = self.env.container.files(parms={'reverse': 'o=[]::::>'})
self.assert_status(200)
self.assertEqual(file_list, cont_files)
class TestContainerPathsEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestContainerPathsEnv, cls).setUp()
cls.file_size = 8
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.files = [
'/file1',
'/file A',
'/dir1/',
'/dir2/',
'/dir1/file2',
'/dir1/subdir1/',
'/dir1/subdir2/',
'/dir1/subdir1/file2',
'/dir1/subdir1/file3',
'/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/',
'/dir1/subdir1/subsubdir1/file5',
'/dir1/subdir1/subsubdir1/file6',
'/dir1/subdir1/subsubdir1/file7',
'/dir1/subdir1/subsubdir1/file8',
'/dir1/subdir1/subsubdir2/',
'/dir1/subdir1/subsubdir2/file9',
'/dir1/subdir1/subsubdir2/file0',
'file1',
'dir1/',
'dir2/',
'dir1/file2',
'dir1/subdir1/',
'dir1/subdir2/',
'dir1/subdir1/file2',
'dir1/subdir1/file3',
'dir1/subdir1/file4',
'dir1/subdir1/subsubdir1/',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file6',
'dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir2/',
'dir1/subdir1/subsubdir2/file9',
'dir1/subdir1/subsubdir2/file0',
'dir1/subdir with spaces/',
'dir1/subdir with spaces/file B',
'dir1/subdir+with{whatever/',
'dir1/subdir+with{whatever/file D',
]
stored_files = set()
for f in cls.files:
file_item = cls.container.file(f)
if f.endswith('/'):
file_item.write(hdrs={'Content-Type': 'application/directory'})
else:
file_item.write_random(cls.file_size,
hdrs={'Content-Type':
'application/directory'})
if (normalized_urls):
nfile = '/'.join(filter(None, f.split('/')))
if (f[-1] == '/'):
nfile += '/'
stored_files.add(nfile)
else:
stored_files.add(f)
cls.stored_files = sorted(stored_files)
class TestContainerPaths(Base):
env = TestContainerPathsEnv
def testTraverseContainer(self):
found_files = []
found_dirs = []
def recurse_path(path, count=0):
if count > 10:
raise ValueError('too deep recursion')
for file_item in self.env.container.files(parms={'path': path}):
self.assertTrue(file_item.startswith(path))
if file_item.endswith('/'):
recurse_path(file_item, count + 1)
found_dirs.append(file_item)
else:
found_files.append(file_item)
recurse_path('')
for file_item in self.env.stored_files:
if file_item.startswith('/'):
self.assertNotIn(file_item, found_dirs)
self.assertNotIn(file_item, found_files)
elif file_item.endswith('/'):
self.assertIn(file_item, found_dirs)
self.assertNotIn(file_item, found_files)
else:
self.assertIn(file_item, found_files)
self.assertNotIn(file_item, found_dirs)
found_files = []
found_dirs = []
recurse_path('/')
for file_item in self.env.stored_files:
if not file_item.startswith('/'):
self.assertNotIn(file_item, found_dirs)
self.assertNotIn(file_item, found_files)
elif file_item.endswith('/'):
self.assertIn(file_item, found_dirs)
self.assertNotIn(file_item, found_files)
else:
self.assertIn(file_item, found_files)
self.assertNotIn(file_item, found_dirs)
def testContainerListing(self):
for format_type in (None, 'json', 'xml'):
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [str(x['name']) for x in files]
self.assertEqual(files, self.env.stored_files)
for format_type in ('json', 'xml'):
for file_item in self.env.container.files(parms={'format':
format_type}):
self.assertGreaterEqual(int(file_item['bytes']), 0)
self.assertIn('last_modified', file_item)
if file_item['name'].endswith('/'):
self.assertEqual(file_item['content_type'],
'application/directory')
def testStructure(self):
def assert_listing(path, file_list):
files = self.env.container.files(parms={'path': path})
self.assertEqual(sorted(file_list, key=locale.strxfrm), files)
if not normalized_urls:
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
assert_listing('/dir1',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/subdir1',
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/'])
assert_listing('/dir1/subdir2', [])
assert_listing('', ['file1', 'dir1/', 'dir2/'])
else:
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
'dir1/subdir2/', 'dir1/subdir with spaces/',
'dir1/subdir+with{whatever/'])
assert_listing('dir1/subdir1',
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
'dir1/subdir1/file2', 'dir1/subdir1/file3',
'dir1/subdir1/subsubdir1/'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir1/subsubdir1/',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir with spaces/',
['dir1/subdir with spaces/file B'])
class TestFileEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestFileEnv, cls).setUp()
if not tf.skip2:
# creating another account and connection
# for account to account copy tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_size = 128
# With keystoneauth we need the accounts to have had the project
# domain id persisted as sysmeta prior to testing ACLs. This may
# not be the case if, for example, the account was created using
# a request with reseller_admin role, when project domain id may
# not have been known. So we ensure that the project domain id is
# in sysmeta by making a POST to the accounts using an admin role.
cls.account.update_metadata()
if not tf.skip2:
cls.account2.update_metadata()
class TestFileDev(Base):
env = TestFileEnv
class TestFileDevUTF8(Base2, TestFileDev):
pass
class TestFile(Base):
env = TestFileEnv
def testGetResponseHeaders(self):
obj_data = b'test_body'
def do_test(put_hdrs, get_hdrs, expected_hdrs, unexpected_hdrs):
filename = Utils.create_name()
file_item = self.env.container.file(filename)
resp = file_item.write(
data=obj_data, hdrs=put_hdrs, return_resp=True)
# put then get an object
resp.read()
read_data = file_item.read(hdrs=get_hdrs)
self.assertEqual(obj_data, read_data) # sanity check
resp_headers = file_item.conn.response.getheaders()
# check the *list* of all header (name, value) pairs rather than
# constructing a dict in case of repeated names in the list
errors = []
for k, v in resp_headers:
if k.lower() in unexpected_hdrs:
errors.append('Found unexpected header %s: %s' % (k, v))
for k, v in expected_hdrs.items():
matches = [hdr for hdr in resp_headers if hdr[0].lower() == k]
if not matches:
errors.append('Missing expected header %s' % k)
for (got_k, got_v) in matches:
# The Connection: header is parsed by cluster's LB and may
# be returned in either original lowercase or camel-cased.
if k == 'connection':
got_v = got_v.lower()
if got_v != v:
errors.append('Expected %s but got %s for %s' %
(v, got_v, k))
if errors:
self.fail(
'Errors in response headers:\n %s' % '\n '.join(errors))
put_headers = {'X-Object-Meta-Fruit': 'Banana',
'X-Delete-After': '10000',
'Content-Type': 'application/test'}
expected_headers = {'content-length': str(len(obj_data)),
'x-object-meta-fruit': 'Banana',
'accept-ranges': 'bytes',
'content-type': 'application/test',
'etag': md5(
obj_data, usedforsecurity=False).hexdigest(),
'last-modified': mock.ANY,
'date': mock.ANY,
'x-delete-at': mock.ANY,
'x-trans-id': mock.ANY,
'x-openstack-request-id': mock.ANY}
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
expected_headers['etag'] = '"%s"' % expected_headers['etag']
unexpected_headers = ['connection', 'x-delete-after']
do_test(put_headers, {}, expected_headers, unexpected_headers)
get_headers = {'Connection': 'keep-alive'}
expected_headers['connection'] = 'keep-alive'
unexpected_headers = ['x-delete-after']
do_test(put_headers, get_headers, expected_headers, unexpected_headers)
def testCopy(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {}
metadata[Utils.create_ascii_name()] = Utils.create_name()
put_headers = {'Content-Type': 'application/test',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile'}
file_item.metadata = metadata
data = file_item.write_random(hdrs=put_headers)
# the allowed headers are configurable in object server, so we cannot
# assert that content-encoding and content-disposition get *copied*
# unless they were successfully set on the original PUT, so populate
# expected_headers by making a HEAD on the original object
file_item.initialize()
self.assertEqual('application/test', file_item.content_type)
resp_headers = dict(file_item.conn.response.getheaders())
expected_headers = {}
for k, v in put_headers.items():
if k.lower() in resp_headers:
expected_headers[k] = v
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
extra_hdrs = {'X-Object-Meta-Extra': 'fresh'}
self.assertTrue(file_item.copy(
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
# verify container listing for copy
listing = cont.files(parms={'format': 'json'})
for obj in listing:
if obj['name'] == dest_filename:
break
else:
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(file_item.content_type, obj['content_type'])
file_copy = cont.file(dest_filename)
self.assertEqual(data, file_copy.read())
self.assertTrue(file_copy.initialize())
expected_metadata = dict(metadata)
# new metadata should be merged with existing
expected_metadata['extra'] = 'fresh'
self.assertDictEqual(expected_metadata, file_copy.metadata)
resp_headers = dict(file_copy.conn.response.getheaders())
for k, v in expected_headers.items():
self.assertIn(k.lower(), resp_headers)
self.assertEqual(v, resp_headers[k.lower()])
# repeat copy with updated content-type, content-encoding and
# content-disposition, which should get updated
extra_hdrs = {
'X-Object-Meta-Extra': 'fresher',
'Content-Type': 'application/test-changed',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile'}
self.assertTrue(file_item.copy(
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
self.assertIn(dest_filename, cont.files())
file_copy = cont.file(dest_filename)
self.assertEqual(data, file_copy.read())
self.assertTrue(file_copy.initialize())
expected_metadata['extra'] = 'fresher'
self.assertDictEqual(expected_metadata, file_copy.metadata)
resp_headers = dict(file_copy.conn.response.getheaders())
# if k is in expected_headers then we can assert its new value
for k, v in expected_headers.items():
v = extra_hdrs.get(k, v)
self.assertIn(k.lower(), resp_headers)
self.assertEqual(v, resp_headers[k.lower()])
# verify container listing for copy
listing = cont.files(parms={'format': 'json'})
for obj in listing:
if obj['name'] == dest_filename:
break
else:
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(
'application/test-changed', obj['content_type'])
# repeat copy with X-Fresh-Metadata header - existing user
# metadata should not be copied, new completely replaces it.
extra_hdrs = {'Content-Type': 'application/test-updated',
'X-Object-Meta-Extra': 'fresher',
'X-Fresh-Metadata': 'true'}
self.assertTrue(file_item.copy(
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
self.assertIn(dest_filename, cont.files())
file_copy = cont.file(dest_filename)
self.assertEqual(data, file_copy.read())
self.assertTrue(file_copy.initialize())
self.assertEqual('application/test-updated',
file_copy.content_type)
expected_metadata = {'extra': 'fresher'}
self.assertDictEqual(expected_metadata, file_copy.metadata)
resp_headers = dict(file_copy.conn.response.getheaders())
for k in ('Content-Disposition', 'Content-Encoding'):
self.assertNotIn(k.lower(), resp_headers)
# verify container listing for copy
listing = cont.files(parms={'format': 'json'})
for obj in listing:
if obj['name'] == dest_filename:
break
else:
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(
'application/test-updated', obj['content_type'])
def testCopyRange(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {Utils.create_ascii_name(): Utils.create_name()}
data = file_item.write_random(1024)
file_item.sync_metadata(metadata)
file_item.initialize()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
expected_body = data[100:201]
expected_etag = md5(expected_body, usedforsecurity=False)
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item.copy('%s%s' % (prefix, cont), dest_filename,
hdrs={'Range': 'bytes=100-200'})
self.assertEqual(201, file_item.conn.response.status)
# verify container listing for copy
listing = cont.files(parms={'format': 'json'})
for obj in listing:
if obj['name'] == dest_filename:
break
else:
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(101, obj['bytes'])
self.assertEqual(expected_etag.hexdigest(), obj['hash'])
self.assertEqual(file_item.content_type, obj['content_type'])
# verify copy object
copy_file_item = cont.file(dest_filename)
self.assertEqual(expected_body, copy_file_item.read())
self.assertTrue(copy_file_item.initialize())
self.assertEqual(metadata, copy_file_item.metadata)
def testCopyAccount(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {Utils.create_ascii_name(): Utils.create_name()}
data = file_item.write_random()
file_item.sync_metadata(metadata)
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
acct = self.env.conn.account_name
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy_account(acct,
'%s%s' % (prefix, cont),
dest_filename)
self.assertIn(dest_filename, cont.files())
file_item = cont.file(dest_filename)
self.assertEqual(data, file_item.read())
self.assertTrue(file_item.initialize())
self.assertEqual(metadata, file_item.metadata)
if not tf.skip2:
dest_cont = self.env.account2.container(Utils.create_name())
self.assertTrue(dest_cont.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
acct = self.env.conn2.account_name
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy_account(acct,
'%s%s' % (prefix, dest_cont),
dest_filename)
self.assertIn(dest_filename, dest_cont.files())
file_item = dest_cont.file(dest_filename)
self.assertEqual(data, file_item.read())
self.assertTrue(file_item.initialize())
self.assertEqual(metadata, file_item.metadata)
def testCopy404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename)
self.assertRaises(ResponseError, file_item.copy,
'%s%s' % (prefix, self.env.container),
Utils.create_name())
self.assert_status(404)
self.assertRaises(ResponseError, file_item.copy,
'%s%s' % (prefix, dest_cont),
Utils.create_name())
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.copy,
'%s%s' % (prefix, self.env.container),
Utils.create_name())
self.assert_status(404)
self.assertRaises(ResponseError, file_item.copy,
'%s%s' % (prefix, dest_cont),
Utils.create_name())
self.assert_status(404)
# invalid destination container
file_item = self.env.container.file(source_filename)
self.assertRaises(ResponseError, file_item.copy,
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name())
def testCopyAccount404s(self):
if tf.skip2:
raise SkipTest('Account2 not set')
acct = self.env.conn.account_name
acct2 = self.env.conn2.account_name
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create(hdrs={
'X-Container-Read': self.env.conn2.user_acl
}))
dest_cont2 = self.env.account2.container(Utils.create_name())
self.assertTrue(dest_cont2.create(hdrs={
'X-Container-Write': self.env.conn.user_acl,
'X-Container-Read': self.env.conn.user_acl
}))
for acct, cont in ((acct, dest_cont), (acct2, dest_cont2)):
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename)
self.assertRaises(ResponseError, file_item.copy_account,
acct,
'%s%s' % (prefix, self.env.container),
Utils.create_name())
# there is no such source container but user has
# permissions to do a GET (done internally via COPY) for
# objects in his own account.
self.assert_status(404)
self.assertRaises(ResponseError, file_item.copy_account,
acct,
'%s%s' % (prefix, cont),
Utils.create_name())
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.copy_account,
acct,
'%s%s' % (prefix, self.env.container),
Utils.create_name())
# there is no such source container but user has
# permissions to do a GET (done internally via COPY) for
# objects in his own account.
self.assert_status(404)
self.assertRaises(ResponseError, file_item.copy_account,
acct,
'%s%s' % (prefix, cont),
Utils.create_name())
self.assert_status(404)
# invalid destination container
file_item = self.env.container.file(source_filename)
self.assertRaises(ResponseError, file_item.copy_account,
acct,
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name())
if acct == acct2:
# there is no such destination container
# and foreign user can have no permission to write there
self.assert_status(403)
else:
self.assert_status(404)
def testCopyNoDestinationHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
file_item = self.env.container.file(source_filename)
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
Utils.create_name(),
cfg={'no_destination': True})
self.assert_status(412)
def testCopyDestinationSlashProblems(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
# no slash
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
Utils.create_name(),
cfg={'destination': Utils.create_name()})
self.assert_status(412)
# too many slashes
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
Utils.create_name(),
cfg={'destination': '//%s' % Utils.create_name()})
self.assert_status(412)
def testCopyFromHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
data = file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = cont.file(dest_filename)
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
prefix, self.env.container.name, source_filename)})
self.assertIn(dest_filename, cont.files())
file_item = cont.file(dest_filename)
self.assertEqual(data, file_item.read())
self.assertTrue(file_item.initialize())
self.assertEqual(metadata, file_item.metadata)
def testCopyFromAccountHeader(self):
if tf.skip2:
raise SkipTest('Account2 not set')
acct = self.env.conn.account_name
src_cont = self.env.account.container(Utils.create_name())
self.assertTrue(src_cont.create(hdrs={
'X-Container-Read': self.env.conn2.user_acl
}))
source_filename = Utils.create_name()
file_item = src_cont.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
data = file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
dest_cont2 = self.env.account2.container(Utils.create_name())
self.assertTrue(dest_cont2.create(hdrs={
'X-Container-Write': self.env.conn.user_acl
}))
for cont in (src_cont, dest_cont, dest_cont2):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = cont.file(dest_filename)
file_item.write(hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' % (
prefix,
src_cont.name,
source_filename)})
self.assertIn(dest_filename, cont.files())
file_item = cont.file(dest_filename)
self.assertEqual(data, file_item.read())
self.assertTrue(file_item.initialize())
self.assertEqual(metadata, file_item.metadata)
def testCopyFromHeader404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
for prefix in ('', '/'):
# invalid source container
file_item = self.env.container.file(Utils.create_name())
copy_from = ('%s%s/%s'
% (prefix, Utils.create_name(), source_filename))
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': copy_from})
self.assert_status(404)
# invalid source object
copy_from = ('%s%s/%s'
% (prefix, self.env.container.name,
Utils.create_name()))
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': copy_from})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
copy_from = ('%s%s/%s'
% (prefix, self.env.container.name, source_filename))
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': copy_from})
self.assert_status(404)
def testCopyFromAccountHeader404s(self):
if tf.skip2:
raise SkipTest('Account2 not set')
acct = self.env.conn2.account_name
src_cont = self.env.account2.container(Utils.create_name())
self.assertTrue(src_cont.create(hdrs={
'X-Container-Read': self.env.conn.user_acl
}))
source_filename = Utils.create_name()
file_item = src_cont.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(),
source_filename)})
self.assert_status(403)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
Utils.create_name())})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
source_filename)})
self.assert_status(404)
def testCopyFromAccountHeader403s(self):
if tf.skip2:
raise SkipTest('Account2 not set')
acct = self.env.conn2.account_name
src_cont = self.env.account2.container(Utils.create_name())
self.assertTrue(src_cont.create()) # Primary user has no access
source_filename = Utils.create_name()
file_item = src_cont.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assertTrue(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(),
source_filename)})
self.assert_status(403)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
Utils.create_name())})
self.assert_status(403)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From-Account': acct,
'X-Copy-From': '%s%s/%s' %
(prefix,
src_cont,
source_filename)})
self.assert_status(403)
def testNameLimit(self):
limit = load_constraint('max_object_name_length')
for lim in (1, 10, limit // 2, limit - 1, limit, limit + 1, limit * 2):
file_item = self.env.container.file('a' * lim)
if lim <= limit:
self.assertTrue(file_item.write())
self.assert_status(201)
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
def testQuestionMarkInName(self):
if Utils.create_name == Utils.create_ascii_name:
file_name = list(Utils.create_name())
file_name[random.randint(2, len(file_name) - 2)] = '?'
file_name = "".join(file_name)
else:
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
file_item = self.env.container.file(file_name)
self.assertTrue(file_item.write(cfg={'no_path_quote': True}))
self.assertNotIn(file_name, self.env.container.files())
self.assertIn(file_name.split('?')[0], self.env.container.files())
def testDeleteThen404s(self):
file_item = self.env.container.file(Utils.create_name())
self.assertTrue(file_item.write_random())
self.assert_status(201)
self.assertTrue(file_item.delete())
self.assert_status(204)
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
for method in (file_item.info,
file_item.read,
file_item.sync_metadata,
file_item.delete):
self.assertRaises(ResponseError, method)
self.assert_status(404)
def testBlankMetadataName(self):
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = {'': Utils.create_name()}
self.assertRaises(ResponseError, file_item.write_random)
self.assert_status(400)
def testMetadataNumberLimit(self):
number_limit = load_constraint('max_meta_count')
size_limit = load_constraint('max_meta_overall_size')
for i in (number_limit - 10, number_limit - 1, number_limit,
number_limit + 1, number_limit + 10, number_limit + 100):
j = size_limit // (i * 2)
metadata = {}
while len(metadata.keys()) < i:
key = Utils.create_ascii_name()
val = Utils.create_name()
if len(key) > j:
key = key[:j]
# NB: we'll likely write object metadata that's *not* UTF-8
if six.PY2:
val = val[:j]
else:
val = val.encode('utf8')[:j].decode(
'utf8', 'surrogateescape')
metadata[key] = val
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if i <= number_limit:
self.assertTrue(file_item.write())
self.assert_status(201)
self.assertTrue(file_item.sync_metadata())
self.assert_status(202)
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assertTrue(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testContentTypeGuessing(self):
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
'zip': 'application/zip'}
container = self.env.account.container(Utils.create_name())
self.assertTrue(container.create())
for i in file_types.keys():
file_item = container.file(Utils.create_name() + '.' + i)
file_item.write(b'', cfg={'no_content_type': True})
file_types_read = {}
for i in container.files(parms={'format': 'json'}):
file_types_read[i['name'].split('.')[1]] = i['content_type']
self.assertEqual(file_types, file_types_read)
def testRangedGets(self):
# We set the file_length to a strange multiple here. This is to check
# that ranges still work in the EC case when the requested range
# spans EC segment boundaries. The 1 MiB base value is chosen because
# that's a common EC segment size. The 1.33 multiple is to ensure we
# aren't aligned on segment boundaries
file_length = int(1048576 * 1.33)
range_size = file_length // 10
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for i in range(0, file_length, range_size):
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
hdrs = {'Range': range_string}
self.assertEqual(
data[i: i + range_size], file_item.read(hdrs=hdrs),
range_string)
range_string = 'bytes=-%d' % (i)
hdrs = {'Range': range_string}
if i == 0:
# RFC 2616 14.35.1
# "If a syntactically valid byte-range-set includes ... at
# least one suffix-byte-range-spec with a NON-ZERO
# suffix-length, then the byte-range-set is satisfiable.
# Otherwise, the byte-range-set is unsatisfiable.
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
self.assert_header('content-range', 'bytes */%d' % file_length)
else:
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
self.assert_header('content-range', 'bytes %d-%d/%d' % (
file_length - i, file_length - 1, file_length))
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-' % (i)
hdrs = {'Range': range_string}
self.assertEqual(
file_item.read(hdrs=hdrs), data[i - file_length:],
range_string)
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
self.assert_header('content-range', 'bytes */%d' % file_length)
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertEqual(file_item.read(hdrs=hdrs), data[-1000:], range_string)
hdrs = {'Range': '0-4'}
self.assertEqual(file_item.read(hdrs=hdrs), data, '0-4')
# RFC 2616 14.35.1
# "If the entity is shorter than the specified suffix-length, the
# entire entity-body is used."
range_string = 'bytes=-%d' % (file_length + 10)
hdrs = {'Range': range_string}
self.assertEqual(file_item.read(hdrs=hdrs), data, range_string)
def testMultiRangeGets(self):
file_length = 10000
range_size = file_length // 10
subrange_size = range_size // 10
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(
file_length, hdrs={"Content-Type":
"lovecraft/rugose; squamous=true"})
for i in range(0, file_length, range_size):
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
i, i + subrange_size - 1,
i + 2 * subrange_size, i + 3 * subrange_size - 1,
i + 4 * subrange_size, i + 5 * subrange_size - 1)
hdrs = {'Range': range_string}
fetched = file_item.read(hdrs=hdrs)
self.assert_status(206)
content_type = file_item.content_type
self.assertTrue(content_type.startswith("multipart/byteranges"))
self.assertIsNone(file_item.content_range)
# email.parser.FeedParser wants a message with headers on the
# front, then two CRLFs, and then a body (like emails have but
# HTTP response bodies don't). We fake it out by constructing a
# one-header preamble containing just the Content-Type, then
# feeding in the response body.
parser = FeedParser()
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
parser.feed(fetched)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertEqual(len(byteranges), 3)
self.assertEqual(byteranges[0]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[0]['Content-Range'],
"bytes %d-%d/%d" % (i, i + subrange_size - 1, file_length))
self.assertEqual(
byteranges[0].get_payload(decode=True),
data[i:(i + subrange_size)])
self.assertEqual(byteranges[1]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[1]['Content-Range'],
"bytes %d-%d/%d" % (i + 2 * subrange_size,
i + 3 * subrange_size - 1, file_length))
self.assertEqual(
byteranges[1].get_payload(decode=True),
data[(i + 2 * subrange_size):(i + 3 * subrange_size)])
self.assertEqual(byteranges[2]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[2]['Content-Range'],
"bytes %d-%d/%d" % (i + 4 * subrange_size,
i + 5 * subrange_size - 1, file_length))
self.assertEqual(
byteranges[2].get_payload(decode=True),
data[(i + 4 * subrange_size):(i + 5 * subrange_size)])
# The first two ranges are satisfiable but the third is not; the
# result is a multipart/byteranges response containing only the two
# satisfiable byteranges.
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
0, subrange_size - 1,
2 * subrange_size, 3 * subrange_size - 1,
file_length, file_length + subrange_size - 1)
hdrs = {'Range': range_string}
fetched = file_item.read(hdrs=hdrs)
self.assert_status(206)
content_type = file_item.content_type
self.assertTrue(content_type.startswith("multipart/byteranges"))
self.assertIsNone(file_item.content_range)
parser = FeedParser()
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
parser.feed(fetched)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertEqual(len(byteranges), 2)
self.assertEqual(byteranges[0]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[0]['Content-Range'],
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
self.assertEqual(byteranges[0].get_payload(decode=True),
data[:subrange_size])
self.assertEqual(byteranges[1]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[1]['Content-Range'],
"bytes %d-%d/%d" % (2 * subrange_size, 3 * subrange_size - 1,
file_length))
self.assertEqual(
byteranges[1].get_payload(decode=True),
data[(2 * subrange_size):(3 * subrange_size)])
# The first range is satisfiable but the second is not; the
# result is either a multipart/byteranges response containing one
# byterange or a normal, non-MIME 206 response.
range_string = 'bytes=%d-%d,%d-%d' % (
0, subrange_size - 1,
file_length, file_length + subrange_size - 1)
hdrs = {'Range': range_string}
fetched = file_item.read(hdrs=hdrs)
self.assert_status(206)
content_type = file_item.content_type
if content_type.startswith("multipart/byteranges"):
self.assertIsNone(file_item.content_range)
parser = FeedParser()
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
parser.feed(fetched)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertEqual(len(byteranges), 1)
self.assertEqual(byteranges[0]['Content-Type'],
"lovecraft/rugose; squamous=true")
self.assertEqual(
byteranges[0]['Content-Range'],
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
self.assertEqual(byteranges[0].get_payload(decode=True),
data[:subrange_size])
else:
self.assertEqual(
file_item.content_range,
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
self.assertEqual(content_type, "lovecraft/rugose; squamous=true")
self.assertEqual(fetched, data[:subrange_size])
# No byterange is satisfiable, so we get a 416 response.
range_string = 'bytes=%d-%d,%d-%d' % (
file_length, file_length + 2,
file_length + 100, file_length + 102)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
self.assert_header('content-range', 'bytes */%d' % file_length)
def testRangedGetsWithLWSinHeader(self):
file_length = 10000
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
self.assertEqual(file_item.read(hdrs={'Range': r}), data[0:1000])
def testFileSizeLimit(self):
limit = load_constraint('max_file_size')
tsecs = 3
def timeout(seconds, method, *args, **kwargs):
try:
with eventlet.Timeout(seconds):
method(*args, **kwargs)
except eventlet.Timeout:
return True
else:
return False
# This loop will result in fallocate calls for 4x the limit
# (minus 111 bytes). With fallocate turned on in the object servers,
# this may fail if you don't have 4x the limit available on your
# data drives.
# Note that this test does not actually send any data to the system.
# All it does is ensure that a response (success or failure) comes
# back within 3 seconds. For the successful tests (size smaller
# than limit), the cluster will log a 499.
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
limit + 10, limit + 100):
file_item = self.env.container.file(Utils.create_name())
if i <= limit:
self.assertTrue(timeout(tsecs, file_item.write,
cfg={'set_content_length': i}))
else:
self.assertRaises(ResponseError, timeout, tsecs,
file_item.write,
cfg={'set_content_length': i})
def testNoContentLengthForPut(self):
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write, b'testing',
cfg={'no_content_length': True})
self.assert_status(411)
def testDelete(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assertIn(file_item.name, self.env.container.files())
self.assertTrue(file_item.delete())
self.assertNotIn(file_item.name, self.env.container.files())
def testBadHeaders(self):
file_length = 100
# no content type on puts should be ok
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(file_length, cfg={'no_content_type': True})
self.assert_status(201)
# content length x
self.assertRaises(ResponseError, file_item.write_random, file_length,
hdrs={'Content-Length': 'X'},
cfg={'no_content_length': True})
self.assert_status(400)
# no content-length
self.assertRaises(ResponseError, file_item.write_random, file_length,
cfg={'no_content_length': True})
self.assert_status(411)
self.assertRaises(ResponseError, file_item.write_random, file_length,
hdrs={'transfer-encoding': 'gzip,chunked'},
cfg={'no_content_length': True})
self.assert_status(501)
# bad request types
# for req in ('LICK', 'GETorHEAD_base', 'container_info',
# 'best_response'):
for req in ('LICK', 'GETorHEAD_base'):
self.env.account.conn.make_request(req)
self.assert_status(405)
# bad range headers
self.assertEqual(
len(file_item.read(hdrs={'Range': 'parsecs=8-12'})),
file_length)
self.assert_status(200)
def testMetadataLengthLimits(self):
key_limit = load_constraint('max_meta_name_length')
value_limit = load_constraint('max_meta_value_length')
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
[key_limit + 1, value_limit], [key_limit, 0],
[key_limit, value_limit * 10],
[key_limit * 10, value_limit]]
for l in lengths:
metadata = {'a' * l[0]: 'b' * l[1]}
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if l[0] <= key_limit and l[1] <= value_limit:
self.assertTrue(file_item.write())
self.assert_status(201)
self.assertTrue(file_item.sync_metadata())
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assertTrue(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testEtagWayoff(self):
file_item = self.env.container.file(Utils.create_name())
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
self.assert_status(422)
def testFileCreate(self):
for i in range(10):
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random()
self.assert_status(201)
self.assertEqual(data, file_item.read())
self.assert_status(200)
def testHead(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random(self.env.file_size)
expected_etag = file_item.md5
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
expected_etag = '"%s"' % expected_etag
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assert_status(200)
self.assertEqual(info['content_length'], self.env.file_size)
self.assertEqual(info['etag'], expected_etag)
self.assertEqual(info['content_type'], content_type)
self.assertIn('last_modified', info)
def testDeleteOfFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
def testHeadOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
def testMetadataOnPost(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
self.assertTrue(file_item.sync_metadata())
self.assert_status(202)
file_item = self.env.container.file(file_item.name)
self.assertTrue(file_item.initialize())
self.assert_status(200)
self.assertEqual(file_item.metadata, metadata)
def testGetContentType(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random()
file_item = self.env.container.file(file_name)
file_item.read()
self.assertEqual(content_type, file_item.content_type)
def testGetOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
def testPostOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
def testMetadataOnPut(self):
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
file_item.write_random(self.env.file_size)
file_item = self.env.container.file(file_item.name)
self.assertTrue(file_item.initialize())
self.assert_status(200)
self.assertEqual(file_item.metadata, metadata)
def testSerialization(self):
container = self.env.account.container(Utils.create_name())
self.assertTrue(container.create())
files = []
for i in (0, 1, 10, 100, 1000, 10000):
files.append({'name': Utils.create_name(),
'content_type': Utils.create_name(), 'bytes': i})
write_time = time.time()
for f in files:
file_item = container.file(f['name'])
file_item.content_type = f['content_type']
file_item.write_random(f['bytes'])
f['hash'] = file_item.md5
f['json'] = False
f['xml'] = False
write_time = time.time() - write_time
for format_type in ['json', 'xml']:
for file_item in container.files(parms={'format': format_type}):
found = False
for f in files:
if f['name'] != file_item['name']:
continue
self.assertEqual(file_item['content_type'],
f['content_type'])
self.assertEqual(int(file_item['bytes']), f['bytes'])
d = datetime.strptime(
file_item['last_modified'].split('.')[0],
"%Y-%m-%dT%H:%M:%S")
lm = time.mktime(d.timetuple())
if 'last_modified' in f:
self.assertEqual(f['last_modified'], lm)
else:
f['last_modified'] = lm
f[format_type] = True
found = True
self.assertTrue(
found, 'Unexpected file %s found in '
'%s listing' % (file_item['name'], format_type))
headers = dict((h.lower(), v)
for h, v in self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEqual(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEqual(headers['content-type'],
'application/xml; charset=utf-8')
lm_diff = max([f['last_modified'] for f in files]) -\
min([f['last_modified'] for f in files])
self.assertLess(lm_diff, write_time + 1,
'Diff in last modified times '
'should be less than time to write files')
for f in files:
for format_type in ['json', 'xml']:
self.assertTrue(
f[format_type], 'File %s not found in %s listing'
% (f['name'], format_type))
def testStackedOverwrite(self):
file_item = self.env.container.file(Utils.create_name())
for i in range(1, 11):
data = file_item.write_random(512)
file_item.write(data)
self.assertEqual(file_item.read(), data)
def testZeroByteFile(self):
file_item = self.env.container.file(Utils.create_name())
self.assertTrue(file_item.write(b''))
self.assertIn(file_item.name, self.env.container.files())
self.assertEqual(file_item.read(), b'')
def testEtagResponse(self):
file_item = self.env.container.file(Utils.create_name())
data = io.BytesIO(file_item.write_random(512))
self.assert_etag(File.compute_md5sum(data))
def testChunkedPut(self):
if (tf.web_front_end == 'apache2'):
raise SkipTest("Chunked PUT cannot be tested with apache2 web "
"front end")
def chunks(s, length=3):
i, j = 0, length
while i < len(s):
yield s[i:j]
i, j = j, j + length
data = File.random_data(10000)
etag = File.compute_md5sum(data)
for i in (1, 10, 100, 1000):
file_item = self.env.container.file(Utils.create_name())
for j in chunks(data, i):
file_item.chunked_write(j)
self.assertTrue(file_item.chunked_write())
self.assertEqual(data, file_item.read())
info = file_item.info()
self.assertEqual(normalize_etag(info['etag']), etag)
def test_POST(self):
# verify consistency between object and container listing metadata
file_name = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = 'text/foobar'
file_item.write_random(1024)
# sanity check
file_item = self.env.container.file(file_name)
file_item.initialize()
self.assertEqual('text/foobar', file_item.content_type)
self.assertEqual(1024, file_item.size)
etag = file_item.etag
# check container listing is consistent
listing = self.env.container.files(parms={'format': 'json'})
for f_dict in listing:
if f_dict['name'] == file_name:
break
else:
self.fail('Failed to find file %r in listing' % file_name)
self.assertEqual(1024, f_dict['bytes'])
self.assertEqual('text/foobar', f_dict['content_type'])
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
self.assertEqual(etag, '"%s"' % f_dict['hash'])
else:
self.assertEqual(etag, f_dict['hash'])
put_last_modified = f_dict['last_modified']
# now POST updated content-type to each file
file_item = self.env.container.file(file_name)
file_item.content_type = 'image/foobarbaz'
file_item.sync_metadata({'Test': 'blah'})
# sanity check object metadata
file_item = self.env.container.file(file_name)
file_item.initialize()
self.assertEqual(1024, file_item.size)
self.assertEqual('image/foobarbaz', file_item.content_type)
self.assertEqual(etag, file_item.etag)
self.assertIn('test', file_item.metadata)
# check for consistency between object and container listing
listing = self.env.container.files(parms={'format': 'json'})
for f_dict in listing:
if f_dict['name'] == file_name:
break
else:
self.fail('Failed to find file %r in listing' % file_name)
self.assertEqual(1024, f_dict['bytes'])
self.assertEqual('image/foobarbaz', f_dict['content_type'])
self.assertLess(put_last_modified, f_dict['last_modified'])
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
self.assertEqual(etag, '"%s"' % f_dict['hash'])
else:
self.assertEqual(etag, f_dict['hash'])
class TestFileUTF8(Base2, TestFile):
pass
class TestFileComparisonEnv(BaseEnv):
@classmethod
def setUp(cls):
super(TestFileComparisonEnv, cls).setUp()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 20
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item)
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
time.gmtime(time.time() - 86400))
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() + 86400))
class TestFileComparison(Base):
env = TestFileComparisonEnv
def testIfMatch(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
def testIfMatchMultipleEtags(self):
for file_item in self.env.files:
hdrs = {'If-Match': '"bogus1", "%s", "bogus2"' % file_item.md5}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
def testIfNoneMatch(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': 'bogus'}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-None-Match': file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfNoneMatchMultipleEtags(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-None-Match':
'"bogus1", "bogus2", "%s"' % file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfModifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old_f1}
self.assertTrue(file_item.read(hdrs=hdrs))
self.assertTrue(file_item.info(hdrs=hdrs))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(304)
self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfUnmodifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Unmodified-Since': self.env.time_new}
self.assertTrue(file_item.read(hdrs=hdrs))
self.assertTrue(file_item.info(hdrs=hdrs))
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_new}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus',
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
self.assert_etag(file_item.md5)
def testLastModified(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
resp = file_item.write_random_return_resp(self.env.file_size)
put_last_modified = resp.getheader('last-modified')
etag = file_item.md5
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assertIn('last_modified', info)
last_modified = info['last_modified']
self.assertEqual(put_last_modified, info['last_modified'])
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
self.assert_etag(etag)
self.assert_header('accept-ranges', 'bytes')
hdrs = {'If-Unmodified-Since': last_modified}
self.assertTrue(file_item.read(hdrs=hdrs))
class TestFileComparisonUTF8(Base2, TestFileComparison):
pass
class TestServiceToken(unittest.TestCase):
def setUp(self):
if tf.skip_service_tokens:
raise SkipTest
if tf.in_process:
tf.skip_if_no_xattrs()
self.SET_TO_USERS_TOKEN = 1
self.SET_TO_SERVICE_TOKEN = 2
# keystoneauth and tempauth differ in allowing PUT account
# Even if keystoneauth allows it, the proxy-server uses
# allow_account_management to decide if accounts can be created
self.put_account_expect = is_client_error
if tf.swift_test_auth_version != '1':
if cluster_info.get('swift').get('allow_account_management'):
self.put_account_expect = is_success
def _scenario_generator(self):
paths = ((None, None), ('c', None), ('c', 'o'))
for path in paths:
for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
yield method, path[0], path[1]
for path in reversed(paths):
yield 'DELETE', path[0], path[1]
def _assert_is_authed_response(self, method, container, object, resp):
resp.read()
expect = is_success
if method == 'DELETE' and not container:
expect = is_client_error
if method == 'PUT' and not container:
expect = self.put_account_expect
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
% (resp.status, method, container, object))
def _assert_not_authed_response(self, method, container, object, resp):
resp.read()
expect = is_client_error
if method == 'OPTIONS':
expect = is_success
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
% (resp.status, method, container, object))
def prepare_request(self, method, use_service_account=False,
container=None, obj=None, body=None, headers=None,
x_auth_token=None,
x_service_token=None, dbg=False):
"""
Setup for making the request
When retry() calls the do_request() function, it calls it the
test user's token, the parsed path, a connection and (optionally)
a token from the test service user. We save options here so that
do_request() can make the appropriate request.
:param method: The operation (e.g. 'HEAD')
:param use_service_account: Optional. Set True to change the path to
be the service account
:param container: Optional. Adds a container name to the path
:param obj: Optional. Adds an object name to the path
:param body: Optional. Adds a body (string) in the request
:param headers: Optional. Adds additional headers.
:param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
SET_TO_USERS_TOKEN Put the test user's token in
X-Auth-Token
SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
:param x_service_token: Optional. Default is to not set X-Service-Token
to any value. If specified, is one of following:
SET_TO_USERS_TOKEN Put the test user's token in
X-Service-Token
SET_TO_SERVICE_TOKEN Put the service token in
X-Service-Token
:param dbg: Optional. Set true to check request arguments
"""
self.method = method
self.use_service_account = use_service_account
self.container = container
self.obj = obj
self.body = body
self.headers = headers
if x_auth_token:
self.x_auth_token = x_auth_token
else:
self.x_auth_token = self.SET_TO_USERS_TOKEN
self.x_service_token = x_service_token
self.dbg = dbg
def do_request(self, url, token, parsed, conn, service_token=''):
if self.use_service_account:
path = self._service_account(parsed.path)
else:
path = parsed.path
if self.container:
path += '/%s' % self.container
if self.obj:
path += '/%s' % self.obj
headers = {}
if self.body:
headers.update({'Content-Length': len(self.body)})
if self.x_auth_token == self.SET_TO_USERS_TOKEN:
headers.update({'X-Auth-Token': token})
elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
headers.update({'X-Auth-Token': service_token})
if self.x_service_token == self.SET_TO_USERS_TOKEN:
headers.update({'X-Service-Token': token})
elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
headers.update({'X-Service-Token': service_token})
if self.dbg:
print('DEBUG: conn.request: method:%s path:%s'
' body:%s headers:%s' % (self.method, path, self.body,
headers))
conn.request(self.method, path, self.body, headers=headers)
return check_response(conn)
def _service_account(self, path):
parts = path.split('/', 3)
account = parts[2]
try:
project_id = account[account.index('_') + 1:]
except ValueError:
project_id = account
parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
return '/'.join(parts)
def test_user_access_own_auth_account(self):
# This covers ground tested elsewhere (tests a user doing HEAD
# on own account). However, if this fails, none of the remaining
# tests will work
self.prepare_request('HEAD')
resp = retry(self.do_request)
resp.read()
self.assertIn(resp.status, (200, 204))
def test_user_cannot_access_service_account(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj)
resp = retry(self.do_request)
self._assert_not_authed_response(method, container, obj, resp)
def test_service_user_denied_with_x_auth_token(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_not_authed_response(method, container, obj, resp)
def test_service_user_denied_with_x_service_token(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_SERVICE_TOKEN,
x_service_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_not_authed_response(method, container, obj, resp)
def test_user_plus_service_can_access_service_account(self):
for method, container, obj in self._scenario_generator():
self.prepare_request(method, use_service_account=True,
container=container, obj=obj,
x_auth_token=self.SET_TO_USERS_TOKEN,
x_service_token=self.SET_TO_SERVICE_TOKEN)
resp = retry(self.do_request, service_user=5)
self._assert_is_authed_response(method, container, obj, resp)
if __name__ == '__main__':
unittest.main()
|
bdf7012e2264d45815b4d4127419f6e6190bf033
|
5eaf43964c2646479f33ad5a89cbf475b466be00
|
/soccer_common/src/soccer_common/transformation2d.py
|
31764b6c62437720c3cbc83a57480a69861a9f9f
|
[
"BSD-3-Clause"
] |
permissive
|
utra-robosoccer/soccerbot
|
12f738017f460a8949df1d2e6fbd93e79ea0662a
|
5509c07931d85583b0d99606f66817afb6fbcbe1
|
refs/heads/master
| 2023-08-18T10:50:02.286728
| 2023-07-20T17:29:01
| 2023-07-20T17:29:01
| 134,985,794
| 118
| 5
|
BSD-3-Clause
| 2023-09-12T10:17:56
| 2018-05-26T18:31:10
|
C
|
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
transformation2d.py
|
"""EXPERIMENTAL, USE AT YOUR OWN RISK!"""
import math
import numpy as np
class Transformation2D(np.ndarray):
def __new__(cls, matrix=None, pos_theta=None, *args, **kwargs):
cls = np.eye(3).view(cls)
if matrix is not None:
cls.matrix = matrix
else:
cls.pos_theta = pos_theta
return cls
@property
def position(self) -> np.ndarray:
# Position in form [x y]
return np.array(self[0:2, 2])
@position.setter
def position(self, position: [float]):
self[0:2, 2] = position
@property
def yaw(self) -> float:
return math.atan2(self[1, 0], self[0, 0])
@yaw.setter
def yaw(self, yaw: float):
self[0, 0] = math.cos(yaw)
self[0, 1] = -math.sin(yaw)
self[1, 0] = math.sin(yaw)
self[1, 1] = math.cos(yaw)
@property
def matrix(self) -> np.ndarray:
return np.array(self)
@matrix.setter
def matrix(self, matrix: np.array):
self[0:3, 0:3] = matrix
@property
def pos_theta(self):
# Field in form [x, y, yaw]
return np.array([self.position[0], self.position[1], self.yaw])
@pos_theta.setter
def pos_theta(self, pos_theta: [float]):
self.position = (pos_theta[0], pos_theta[1])
self.yaw = pos_theta[2]
|
ae3a5e4e4829d549533f9070d815fca6e4a46994
|
2cc3a438a51895eb729cf4e1b702e824b75b9d21
|
/confidnet/learners/selfconfid_learner.py
|
84f78965c5a1464dba5e70946b5b2c2c36ae2af5
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
valeoai/ConfidNet
|
97005765e82a3095de510cfe238d87ff0e2235db
|
57d4a056adab1eb75f4a54c73422e60751ea3709
|
refs/heads/master
| 2022-12-07T21:09:35.091996
| 2022-11-29T14:46:40
| 2022-11-29T14:46:40
| 210,617,673
| 173
| 35
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,291
|
py
|
selfconfid_learner.py
|
import os
from collections import OrderedDict
import numpy as np
import torch
from tqdm import tqdm
from confidnet.learners.learner import AbstractLeaner
from confidnet.utils import misc
from confidnet.utils.logger import get_logger
from confidnet.utils.metrics import Metrics
LOGGER = get_logger(__name__, level="DEBUG")
class SelfConfidLearner(AbstractLeaner):
def __init__(self, config_args, train_loader, val_loader, test_loader, start_epoch, device):
super().__init__(config_args, train_loader, val_loader, test_loader, start_epoch, device)
self.freeze_layers()
self.disable_bn(verbose=True)
if self.config_args["model"].get("uncertainty", None):
self.disable_dropout(verbose=True)
def train(self, epoch):
self.model.train()
self.disable_bn()
if self.config_args["model"].get("uncertainty", None):
self.disable_dropout()
metrics = Metrics(
self.metrics, self.prod_train_len, self.num_classes
)
loss, confid_loss = 0, 0
len_steps, len_data = 0, 0
# Training loop
loop = tqdm(self.train_loader)
for batch_id, (data, target) in enumerate(loop):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
# Potential temperature scaling
if self.temperature:
output = list(output)
output[0] = output[0] / self.temperature
output = tuple(output)
if self.task == "classification":
current_loss = self.criterion(output, target)
elif self.task == "segmentation":
current_loss = self.criterion(output, target.squeeze(dim=1))
current_loss.backward()
loss += current_loss
self.optimizer.step()
if self.task == "classification":
len_steps += len(data)
len_data = len_steps
elif self.task == "segmentation":
len_steps += len(data) * np.prod(data.shape[-2:])
len_data += len(data)
# Update metrics
pred = output[0].argmax(dim=1, keepdim=True)
confidence = torch.sigmoid(output[1])
metrics.update(pred, target, confidence)
# Update the average loss
loop.set_description(f"Epoch {epoch}/{self.nb_epochs}")
loop.set_postfix(
OrderedDict(
{
"loss_confid": f"{(loss / len_data):05.3e}",
"acc": f"{(metrics.accuracy / len_steps):05.2%}",
}
)
)
loop.update()
# Eval on epoch end
scores = metrics.get_scores(split="train")
logs_dict = OrderedDict(
{
"epoch": {"value": epoch, "string": f"{epoch:03}"},
"lr": {
"value": self.optimizer.param_groups[0]["lr"],
"string": f"{self.optimizer.param_groups[0]['lr']:05.1e}",
},
"train/loss_confid": {
"value": loss / len_data,
"string": f"{(loss / len_data):05.4e}",
},
}
)
for s in scores:
logs_dict[s] = scores[s]
# Val scores
val_losses, scores_val = self.evaluate(self.val_loader, self.prod_val_len, split="val")
logs_dict["val/loss_confid"] = {
"value": val_losses["loss_confid"].item() / self.nsamples_val,
"string": f"{(val_losses['loss_confid'].item() / self.nsamples_val):05.4e}",
}
for sv in scores_val:
logs_dict[sv] = scores_val[sv]
# Test scores
test_losses, scores_test = self.evaluate(self.test_loader, self.prod_test_len, split="test")
logs_dict["test/loss_confid"] = {
"value": test_losses["loss_confid"].item() / self.nsamples_test,
"string": f"{(test_losses['loss_confid'].item() / self.nsamples_test):05.4e}",
}
for st in scores_test:
logs_dict[st] = scores_test[st]
# Print metrics
misc.print_dict(logs_dict)
# Save the model checkpoint
self.save_checkpoint(epoch)
# CSV logging
misc.csv_writter(path=self.output_folder / "logs.csv", dic=OrderedDict(logs_dict))
# Tensorboard logging
self.save_tb(logs_dict)
# Scheduler step
if self.scheduler:
self.scheduler.step()
def evaluate(self, dloader, len_dataset, split="test", verbose=False, **args):
self.model.eval()
metrics = Metrics(self.metrics, len_dataset, self.num_classes)
loss = 0
# Evaluation loop
loop = tqdm(dloader, disable=not verbose)
for batch_id, (data, target) in enumerate(loop):
data, target = data.to(self.device), target.to(self.device)
with torch.no_grad():
output = self.model(data)
if self.task == "classification":
loss += self.criterion(output, target)
elif self.task == "segmentation":
loss += self.criterion(output, target.squeeze(dim=1))
# Update metrics
pred = output[0].argmax(dim=1, keepdim=True)
confidence = torch.sigmoid(output[1])
metrics.update(pred, target, confidence)
scores = metrics.get_scores(split=split)
losses = {"loss_confid": loss}
return losses, scores
def load_checkpoint(self, state_dict, uncertainty_state_dict=None, strict=True):
if not uncertainty_state_dict:
self.model.load_state_dict(state_dict, strict=strict)
else:
self.model.pred_network.load_state_dict(state_dict, strict=strict)
# 1. filter out unnecessary keys
if self.task == "classification":
state_dict = {
k: v
for k, v in uncertainty_state_dict.items()
if k not in ["fc2.weight", "fc2.bias"]
}
if self.task == "segmentation":
state_dict = {
k: v
for k, v in uncertainty_state_dict.items()
if k
not in [
"up1.conv2.cbr_unit.0.weight",
"up1.conv2.cbr_unit.0.bias",
"up1.conv2.cbr_unit.1.weight",
"up1.conv2.cbr_unit.1.bias",
"up1.conv2.cbr_unit.1.running_mean",
"up1.conv2.cbr_unit.1.running_var",
]
}
# 2. overwrite entries in the existing state dict
self.model.uncertainty_network.state_dict().update(state_dict)
# 3. load the new state dict
self.model.uncertainty_network.load_state_dict(state_dict, strict=False)
def freeze_layers(self):
# Eventual fine-tuning for self-confid
LOGGER.info("Freezing every layer except uncertainty")
for param in self.model.named_parameters():
if "uncertainty" in param[0]:
print(param[0], "kept to training")
continue
param[1].requires_grad = False
def disable_bn(self, verbose=False):
# Freeze also BN running average parameters
if verbose:
LOGGER.info("Keeping original BN parameters")
for layer in self.model.named_modules():
if "bn" in layer[0] or "cbr_unit.1" in layer[0]:
if verbose:
print(layer[0], "original BN setting")
layer[1].momentum = 0
layer[1].eval()
def disable_dropout(self, verbose=False):
# Freeze also BN running average parameters
if verbose:
LOGGER.info("Disable dropout layers to reduce stochasticity")
for layer in self.model.named_modules():
if "dropout" in layer[0]:
if verbose:
print(layer[0], "set to eval mode")
layer[1].eval()
|
a024c4bf663803cf58b44e917b32362c673a2dcc
|
8d402df39c18eba7e1c86c762f205c944357c5df
|
/www/tests/test_sys.py
|
2c36f8fd30213202e8a10028573095cecc4caf58
|
[
"BSD-3-Clause"
] |
permissive
|
brython-dev/brython
|
87cc023e25550dec9ce459ba68774189f33712b6
|
b33958bff0e8c7a280babc30232dc389a2500a7a
|
refs/heads/master
| 2023-09-04T04:49:29.156209
| 2023-09-01T06:36:08
| 2023-09-01T06:36:08
| 24,046,239
| 6,569
| 625
|
BSD-3-Clause
| 2023-07-05T06:13:32
| 2014-09-15T06:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
test_sys.py
|
import sys
# issue 1218
x = "Outer stack frame"
def t():
x = "Inner stack frame"
my_locals = sys._getframe(0).f_locals
assert my_locals['x'] == "Inner stack frame"
outer_locals = sys._getframe(1).f_locals
assert outer_locals['x'] == "Outer stack frame"
t()
# trace functions
traces = []
def f(frame, event, arg):
global first_line
if isinstance(arg, tuple):
arg = arg[0]
if not traces:
first_line = frame.f_lineno
trace = [event, frame.f_code.co_name,
frame.f_lineno - first_line, arg]
traces.append(trace)
return f
sys.settrace(f)
def g(x):
for i in range(2):
pass
try:
1/0
except:
pass # ignore exception
for car in 'abc':
pass
return x
g(44)
def h():
pass
h()
class A:
def f(self):
print("A.f")
return 4
A()
expected = [
['call', 'g', 0, None],
['line', 'g', 1, None],
['line', 'g', 2, None],
['line', 'g', 1, None],
['line', 'g', 2, None],
['line', 'g', 1, None],
['line', 'g', 3, None],
['line', 'g', 4, None],
['exception', 'g', 4, ZeroDivisionError],
['line', 'g', 5, None],
['line', 'g', 6, None],
['line', 'g', 7, None],
['line', 'g', 8, None],
['line', 'g', 7, None],
['line', 'g', 8, None],
['line', 'g', 7, None],
['line', 'g', 8, None],
['line', 'g', 7, None],
['line', 'g', 9, None],
['return', 'g', 9, 44],
['call', 'h', 13, None],
['line', 'h', 14, None],
['return', 'h', 14, None],
['call', 'A', 18, None],
['line', 'A', 18, None],
['line', 'A', 20, None],
['return', 'A', 20, None]
]
if traces != expected:
for i, (line1, line2) in enumerate(zip(traces, expected)):
if line1 == line2:
print('same line', i, 'traces', line1, 'expected', line2)
else:
print('diff line', i, 'traces', line1, 'expected', line2)
raise AssertionError('result is not the same as expected')
else:
print('remaining in traces\n', traces[i:],
'\nremaining in expected', expected[i:])
# issue 2055
def f():
a = [i for i in range(10)]
argcounts = []
def traceFn(frame, event, arg):
if (event != 'call'): return
argcounts.append(frame.f_code.co_argcount)
return traceFn
sys.settrace(traceFn)
f()
assert argcounts == [0, 1]
# same for gen expr
def f1():
assert argcounts == [0] # for f()
a = (i for i in range(10))
argcounts = []
f1()
assert argcounts == [0, 1], argcounts
def f2():
assert argcounts == [0] # for f()
a = (i for i in range(10))
list(a)
argcounts = []
f2()
assert argcounts == [0, 1] + [1] * 10
# issue 2056
def f3():
for value in (0,):
pass
def traceFn(frame, event, arg):
traces.append(event)
return traceFn
sys.settrace(traceFn)
traces = []
f3()
assert traces == ['call', 'line', 'line', 'line', 'return']
# issue 2113
t = []
def traceFn(frame, event, arg):
t.append(event)
return traceFn
sys.settrace(traceFn)
[x for x in ()]
assert t == ['call', 'line', 'return']
# remove trace for next tests
sys.settrace(None)
|
dfcb3fdd410d4ccb6b10b2d7c4bc3807c1cf0952
|
7ebb2f0458d3813737dd045473d7c1398d08392d
|
/pyclesperanto_prototype/_tier4/_dilate_labels.py
|
6ad085b01c8fb3ab9dae1107672adc0eabb85ff6
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
clEsperanto/pyclesperanto_prototype
|
b3192d6984f45571fe0a7dfcceee2058bc4debbe
|
b465c8669f8e9326874139cf4b9c9af22c22757c
|
refs/heads/master
| 2023-09-04T11:07:55.828329
| 2023-08-25T17:18:30
| 2023-08-25T17:18:30
| 248,206,619
| 152
| 36
|
BSD-3-Clause
| 2023-05-23T09:44:51
| 2020-03-18T10:56:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
_dilate_labels.py
|
from .._tier0 import Image
from .._tier0 import plugin_function
from .._tier0 import push
from .._tier0 import pull
from .._tier0 import create_like, create_labels_like
from .._tier1 import copy
from .._tier1 import set
from .._tier1 import onlyzero_overwrite_maximum_box
from .._tier1 import onlyzero_overwrite_maximum_diamond
import numpy as np
@plugin_function(categories=['label processing', 'in assistant'], output_creator=create_labels_like)
def dilate_labels(labeling_source : Image, labeling_destination : Image = None, radius: int = 2) -> Image:
"""Dilates labels to a larger size. No label overwrites another label.
Similar to the implementation in scikit-image [2] and MorpholibJ[3]
Notes
-----
* This operation assumes input images are isotropic.
Parameters
----------
labels_input : Image
label image to erode
labels_destination : Image, optional, optional
result
radius : int, optional
Returns
-------
labels_destination
See Also
--------
..[1] https://clij.github.io/clij2-docs/reference_dilateLabels
..[2] https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_expand_labels.html?highlight=expand%20labels
..[3] https://github.com/ijpb/MorphoLibJ
"""
flip = create_like(labeling_destination)
flop = create_like(labeling_destination)
flag = push(np.asarray([[[0]]]))
flag_value = 1
copy(labeling_source, flip)
iteration_count = 0
while flag_value > 0 and iteration_count < radius:
if (iteration_count % 2 == 0):
onlyzero_overwrite_maximum_box(flip, flag, flop)
else:
onlyzero_overwrite_maximum_diamond(flop, flag, flip)
flag_value = pull(flag)[0][0][0]
set(flag, 0)
iteration_count += 1
if (iteration_count % 2 == 0):
copy(flip, labeling_destination)
else:
copy(flop, labeling_destination)
return labeling_destination
|
81c5bbc50488533c481d2c739481456fac5c8664
|
b728c792b5171f6be6ad91919b4a76a6f198b3e9
|
/src/lib/python/bundy/testutils/ccsession_mock.py
|
5f886787d57bf6cb6706791d146c487c15a73676
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSL-1.0"
] |
permissive
|
bundy-dns/bundy
|
c8beeca2c051924590794c92a3a58d1980a86024
|
3d41934996b82b0cd2fe22dd74d2abc1daba835d
|
refs/heads/master
| 2021-09-28T16:24:39.037808
| 2021-09-22T06:04:17
| 2021-09-22T06:04:17
| 19,160,469
| 110
| 33
|
NOASSERTION
| 2021-09-22T06:04:18
| 2014-04-25T20:54:37
|
C++
|
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
ccsession_mock.py
|
# Copyright (C) 2012 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
class MockModuleCCSession():
"""Fake ModuleCCSession with a minimal implementation as needed by the
tests. Currently this module only stores whether some methods have
been called on it (send_stopping(), and close())"""
def __init__(self):
"""Will be set to True when send_stopping() is called"""
self.stopped = False
"""Will be set to True when close() is called"""
self.closed = False
def send_stopping(self):
"""Fake send_stopping() call. No message is sent, but only stores
that this method has been called."""
self.stopped = True
def close(self):
"""Fake close() call. Nothing is closed, but only stores
that this method has been called."""
self.closed = True
|
e79dce1a94b26ac4b685517550a01840648de7b0
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/components/flexure/funcs.py
|
43f5c004ecb5feaa7ca9a0bc54a81976c6f93460
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,799
|
py
|
funcs.py
|
#!/usr/bin/env python
import numpy as np
import scipy.special
_POISSON = 0.25
_N_PROCS = 4
def get_flexure_parameter(h, E, n_dim, gamma_mantle=33000.0):
"""
Calculate the flexure parameter based on some physical constants. *h* is
the Effective elastic thickness of Earth's crust (m), *E* is Young's
Modulus, and *n_dim* is the number of spatial dimensions for which the
flexure parameter is used. The number of dimension must be either 1, or
2.
Examples
--------
>>> from landlab.components.flexure import get_flexure_parameter
>>> eet = 65000.
>>> youngs = 7e10
>>> alpha = get_flexure_parameter(eet, youngs, 1)
>>> print('%.3f' % round(alpha, 3))
119965.926
>>> alpha = get_flexure_parameter(eet, youngs, 2)
>>> print('%.2f' % alpha)
84828.72
"""
D = E * pow(h, 3) / 12.0 / (1.0 - pow(_POISSON, 2))
if n_dim not in (1, 2):
raise ValueError("n_dim must be either 1 or 2")
if n_dim == 2:
alpha = pow(D / gamma_mantle, 0.25)
else:
alpha = pow(4.0 * D / gamma_mantle, 0.25)
return alpha
def _calculate_distances(locs, coords):
r = pow(coords[0][:, np.newaxis] - locs[0], 2)
r += pow(coords[1][:, np.newaxis] - locs[1], 2)
return np.sqrt(r, out=r)
def _calculate_deflections(load, locs, coords, alpha, out=None, gamma_mantle=33000.0):
c = -load / (2.0 * np.pi * gamma_mantle * pow(alpha, 2.0))
r = _calculate_distances(locs, coords) / alpha
scipy.special.kei(r, out=r)
np.multiply(r, c[np.newaxis, :], out=r)
return np.sum(r, axis=1, out=out)
def subside_point_load(load, loc, coords, params=None, out=None):
"""Calculate deflection at points due a point load.
Calculate deflections on a grid, defined by the points in the *coords*
tuple, due to a point load of magnitude *load* applied at *loc*.
*x* and *y* are the x and y coordinates of each node of the solution
grid (in meters). The scalars *eet* and *youngs* define the crustal
properties.
Parameters
----------
load : float
Magnitude of the point load.
loc : float or tuple
Location of the load as either a scalar or as (*x*, *y*)
coords : ndarray
Array of points to calculate deflections at
params : dict-like
Physical parameters used for deflection calculation. Valid keys are
- *eet*: Effective elastic thickness
- *youngs*: Young's modulus
out : ndarray, optional
Array to put deflections into.
Returns
-------
out : ndarray
Array of deflections.
Examples
--------
>>> from landlab.components.flexure import subside_point_load
>>> params = dict(eet=65000., youngs=7e10)
>>> load = 1e9
Define a unifrom rectilinear grid.
>>> x = np.arange(0, 10000, 100.)
>>> y = np.arange(0, 5000, 100.)
>>> (x, y) = np.meshgrid(x, y)
>>> x.shape = (x.size, )
>>> y.shape = (y.size, )
Calculate deflections due to a load applied at position (5000., 2500.).
>>> x = np.arange(0, 10000, 1000.)
>>> y = np.arange(0, 5000, 1000.)
>>> (x, y) = np.meshgrid(x, y)
>>> x.shape = (x.size, )
>>> y.shape = (y.size, )
>>> dz = subside_point_load(load, (5000., 2500.), (x, y), params=params)
>>> print('%.5g' % round(dz.sum(), 9))
2.6267e-05
>>> print(round(dz.min(), 9))
5.24e-07
>>> print(round(dz.max(), 9))
5.26e-07
>>> dz = subside_point_load((1e9, 1e9), ((5000., 5000.), (2500., 2500.)),
... (x, y), params=params)
>>> print(round(dz.min(), 9) / 2.)
5.235e-07
>>> print(round(dz.max(), 9) / 2.)
5.265e-07
"""
params = params or {"eet": 6500.0, "youngs": 7.0e10}
eet, youngs = params["eet"], params["youngs"]
gamma_mantle = params.get("gamma_mantle", 33000.0)
load = np.asarray(load).reshape((-1,))
loc = np.asarray(loc).reshape((-1, len(load)))
coords = np.asarray(coords)
if coords.ndim == 1:
coords = np.expand_dims(coords, axis=0)
n_dim = len(loc)
if n_dim not in (1, 2):
raise ValueError("number of dimension must be 1 or 2")
if len(coords) != n_dim:
raise ValueError("number of dimensions in coordinates doesn't match loc")
if out is None:
out = np.empty(coords[0].size, dtype=float)
alpha = get_flexure_parameter(eet, youngs, n_dim, gamma_mantle=gamma_mantle)
if n_dim == 2:
_calculate_deflections(
load, loc, coords, alpha, out=out, gamma_mantle=gamma_mantle
)
else:
x, x0 = np.meshgrid(loc[0], coords[0])
c = load / (2.0 * alpha * gamma_mantle)
r = abs(x - x0) / alpha
out[:] = (c * np.exp(-r) * (np.cos(r) + np.sin(r))).sum(axis=1)
return out
|
01f17d99711027f73756df75f510bd400f297308
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/spacy/tests/regression/test_issue118.py
|
b4e1f02b2787a2814dde46393597b774033323e9
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
test_issue118.py
|
# coding: utf-8
from __future__ import unicode_literals
from ...matcher import Matcher
import pytest
pattern1 = [[{'LOWER': 'celtics'}], [{'LOWER': 'boston'}, {'LOWER': 'celtics'}]]
pattern2 = [[{'LOWER': 'boston'}, {'LOWER': 'celtics'}], [{'LOWER': 'celtics'}]]
pattern3 = [[{'LOWER': 'boston'}], [{'LOWER': 'boston'}, {'LOWER': 'celtics'}]]
pattern4 = [[{'LOWER': 'boston'}, {'LOWER': 'celtics'}], [{'LOWER': 'boston'}]]
@pytest.fixture
def doc(en_tokenizer):
text = "how many points did lebron james score against the boston celtics last night"
doc = en_tokenizer(text)
return doc
@pytest.mark.parametrize('pattern', [pattern1, pattern2])
def test_issue118(doc, pattern):
"""Test a bug that arose from having overlapping matches"""
ORG = doc.vocab.strings['ORG']
matcher = Matcher(doc.vocab)
matcher.add("BostonCeltics", None, *pattern)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
@pytest.mark.parametrize('pattern', [pattern3, pattern4])
def test_issue118_prefix_reorder(doc, pattern):
"""Test a bug that arose from having overlapping matches"""
ORG = doc.vocab.strings['ORG']
matcher = Matcher(doc.vocab)
matcher.add('BostonCeltics', None, *pattern)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
doc.ents += tuple(matches)[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
|
e25adfa7df9301833fe032517384c5d6637f73d7
|
c3493b2d99ea73b71d6a930482a906c11432c568
|
/muspy/schemas/utils.py
|
8c70def6a62680d2eb71579cc57ecdfa7c4a2c36
|
[
"MIT"
] |
permissive
|
salu133445/muspy
|
bdf1a1cc2d28e5fc8423ec7fe2e467fb0b67122c
|
b2d4265c6279e730903d8abe9dddda8484511903
|
refs/heads/main
| 2023-07-08T09:29:22.413086
| 2023-06-26T08:08:22
| 2023-06-26T08:08:22
| 247,167,654
| 380
| 54
|
MIT
| 2023-01-02T20:20:21
| 2020-03-13T21:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,526
|
py
|
utils.py
|
"""Utility functions for working with schemas."""
import json
from pathlib import Path
from typing import Union
try:
import jsonschema
_HAS_JSONSCHEMA = True
except ImportError:
_HAS_JSONSCHEMA = False
try:
import xmlschema
_HAS_XMLSCHEMA = True
except ImportError:
_HAS_XMLSCHEMA = False
try:
import yamale
_HAS_YAMALE = True
except ImportError:
_HAS_YAMALE = False
def get_json_schema_path() -> str:
"""Return the path to the JSON schema."""
return str(Path(__file__).resolve().parent / "music.schema.json")
def get_yaml_schema_path() -> str:
"""Return the path to the YAML schema."""
return str(Path(__file__).resolve().parent / "music.schema.yaml")
def get_musicxml_schema_path() -> str:
"""Return the path to the MusicXML schema."""
return str(Path(__file__).resolve().parent / "musicxml.xsd")
def validate_json(path: Union[str, Path]):
"""Validate a file against the JSON schema.
Parameters
----------
path : str or Path
Path to the file to validate.
"""
if not _HAS_JSONSCHEMA:
raise RuntimeError(
"The jsonschema library is required for JSON schema validation. "
"You could install it by `pip install muspy[schema]`."
)
with open(str(path), encoding="utf-8") as f:
data = json.load(f)
with open(str(get_json_schema_path()), encoding="utf-8") as f:
schema = json.load(f)
jsonschema.validate(data, schema)
def validate_yaml(path: Union[str, Path]):
"""Validate a file against the YAML schema.
Parameters
----------
path : str or Path
Path to the file to validate.
"""
if not _HAS_YAMALE:
raise RuntimeError(
"The Yamale library is required for YAML schema validation. "
"You could install it by `pip install muspy[schema]`."
)
data = yamale.make_data(str(path))
schema = yamale.make_schema(str(get_yaml_schema_path()))
yamale.validate(schema, data)
def validate_musicxml(path: Union[str, Path]):
"""Validate a file against the MusicXML schema.
Parameters
----------
path : str or Path
Path to the file to validate.
"""
if not _HAS_XMLSCHEMA:
raise RuntimeError(
"The xmlschema library is required for MusicXML schema "
"validation. You could install it by `pip install muspy[schema]`."
)
schema = xmlschema.XMLSchema(get_musicxml_schema_path())
schema.validate(str(path))
|
c7d490a0e8c4dab97c52557750e50b7602b17b46
|
0be19cd3a3ec44141f03dd523da6fb770b08f569
|
/src/detext/layers/feature_normalizer.py
|
8916618e225472d47d1be7386faf848807cee16c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
linkedin/detext
|
f2cdace77d4b3c6cc88d3992a67dde305e2b8a52
|
671d43c5ffc83cae635174ed15c58d0bc84b76ef
|
refs/heads/master
| 2023-09-01T00:53:57.571516
| 2023-03-02T22:27:06
| 2023-03-02T22:27:06
| 234,432,813
| 1,289
| 151
|
BSD-2-Clause
| 2023-03-02T22:03:34
| 2020-01-16T23:38:58
|
Python
|
UTF-8
|
Python
| false
| false
| 807
|
py
|
feature_normalizer.py
|
import tensorflow as tf
class FeatureNormalizer(tf.keras.layers.Layer):
"""Feature normalizer to normalize dense features
This layer improves numeric stability and is useful for network convergence
"""
def __init__(self, ftr_mean, ftr_std):
super(FeatureNormalizer, self).__init__()
self.ftr_mean = tf.constant(ftr_mean, dtype=tf.dtypes.float32)
self.ftr_std = tf.constant(ftr_std, dtype=tf.dtypes.float32)
def call(self, inputs, **kwargs):
""" Normalizes inputs to (inputs - self.ftr_mean) / self.ftr_std
:param inputs: Tensor(tf.float32). Shape=[..., num_ftrs]
:param kwargs: Dummy args for suppress warning for method overriding
:return: Normalized input
"""
return (inputs - self.ftr_mean) / self.ftr_std
|
8abaaf452754f809d599fbdf37edb934a27fd938
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/projects/migrations/0026_auto_20200519_1616.py
|
46b99eea5cb557c8c2bab50cd3f51032025e3545
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
0026_auto_20200519_1616.py
|
# Generated by Django 2.2.12 on 2020-05-19 20:16
from django.db import migrations
def populate_project_organizations(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Organization = apps.get_model('organizations', 'Organization')
# assignment of existing projects to orgs based on project name based on
# discussions w/ Matt Spencer (2020-05-20)
org_project_mapping = {
'18F': ['18F', 'TTS Acq'],
'CoE': ['CoE'],
'cloud.gov': ['cloud.gov'],
'Login.gov': ['Login.gov'],
'OA': ['TTS OA'],
'PIF': ['PIF']
}
for org_name in org_project_mapping.keys():
try:
org = Organization.objects.filter(name=org_name)[0]
for project_name_start in org_project_mapping[org_name]:
Project.objects.filter(name__istartswith=project_name_start).update(organization=org)
except IndexError:
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0025_auto_20200303_1821'),
('organizations', '0006_unit_initial_data')
]
operations = [
migrations.RunPython(populate_project_organizations),
]
|
c93052f13af0ebae53673d956159025c2b67bc85
|
cb4f118412a55c52d720bc79e4074606622920ac
|
/tests/unit/paths/test_line_of_sight.py
|
cace5d6da4722e3e3de389b056392b0fa636ee35
|
[
"MIT"
] |
permissive
|
pythonarcade/arcade
|
3e536306f0c44f911de149b58958d8b609ffad4b
|
908664efc256697d3098a347f63d217d97841782
|
refs/heads/development
| 2023-08-29T02:53:01.599145
| 2023-08-26T16:54:34
| 2023-08-26T16:54:34
| 49,003,082
| 786
| 215
|
NOASSERTION
| 2023-09-12T18:38:54
| 2016-01-04T14:46:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
test_line_of_sight.py
|
import arcade
def test_line_of_sight(window):
player = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png")
player.center_x = 0
player.center_y = 350
enemy = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png")
enemy.center_x = 250
enemy.center_y = 350
wall_list = arcade.SpriteList(use_spatial_hash=True)
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list)
assert result
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list, 2000)
assert result
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list, 20)
assert not result
wall = arcade.Sprite(":resources:images/tiles/grassCenter.png")
wall.center_x = 0
wall.center_y = 0
wall_list.append(wall)
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list)
assert result
wall.center_x = 100
wall.center_y = 350
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list)
assert not result
wall.center_x = 100
wall.center_y = 450
result = arcade.has_line_of_sight(player.position, enemy.position, wall_list)
assert result
|
75844a58ed98cffab6dd7a3b99e62cd87efc1c26
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/stats/oneway.py
|
04dbd1d97dafc793bc5927cf9bb1727cfce84615
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 45,809
|
py
|
oneway.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 10:33:38 2020
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
from scipy.special import ncfdtrinc
# functions that use scipy.special instead of boost based function in stats
from statsmodels.stats.power import ncf_cdf, ncf_ppf
from statsmodels.stats.robust_compare import TrimmedMean, scale_transform
from statsmodels.tools.testing import Holder
from statsmodels.stats.base import HolderTuple
def effectsize_oneway(means, vars_, nobs, use_var="unequal", ddof_between=0):
"""
Effect size corresponding to Cohen's f = nc / nobs for oneway anova
This contains adjustment for Welch and Brown-Forsythe Anova so that
effect size can be used with FTestAnovaPower.
Parameters
----------
means : array_like
Mean of samples to be compared
vars_ : float or array_like
Residual (within) variance of each sample or pooled
If ``vars_`` is scalar, then it is interpreted as pooled variance that
is the same for all samples, ``use_var`` will be ignored.
Otherwise, the variances are used depending on the ``use_var`` keyword.
nobs : int or array_like
Number of observations for the samples.
If nobs is scalar, then it is assumed that all samples have the same
number ``nobs`` of observation, i.e. a balanced sample case.
Otherwise, statistics will be weighted corresponding to nobs.
Only relative sizes are relevant, any proportional change to nobs does
not change the effect size.
use_var : {"unequal", "equal", "bf"}
If ``use_var`` is "unequal", then the variances can differ across
samples and the effect size for Welch anova will be computed.
ddof_between : int
Degrees of freedom correction for the weighted between sum of squares.
The denominator is ``nobs_total - ddof_between``
This can be used to match differences across reference literature.
Returns
-------
f2 : float
Effect size corresponding to squared Cohen's f, which is also equal
to the noncentrality divided by total number of observations.
Notes
-----
This currently handles the following cases for oneway anova
- balanced sample with homoscedastic variances
- samples with different number of observations and with homoscedastic
variances
- samples with different number of observations and with heteroskedastic
variances. This corresponds to Welch anova
In the case of "unequal" and "bf" methods for unequal variances, the
effect sizes do not directly correspond to the test statistic in Anova.
Both have correction terms dropped or added, so the effect sizes match up
with using FTestAnovaPower.
If all variances are equal, then all three methods result in the same
effect size. If variances are unequal, then the three methods produce
small differences in effect size.
Note, the effect size and power computation for BF Anova was not found in
the literature. The correction terms were added so that FTestAnovaPower
provides a good approximation to the power.
Status: experimental
We might add additional returns, if those are needed to support power
and sample size applications.
Examples
--------
The following shows how to compute effect size and power for each of the
three anova methods. The null hypothesis is that the means are equal which
corresponds to a zero effect size. Under the alternative, means differ
with two sample means at a distance delta from the mean. We assume the
variance is the same under the null and alternative hypothesis.
``nobs`` for the samples defines the fraction of observations in the
samples. ``nobs`` in the power method defines the total sample size.
In simulations, the computed power for standard anova,
i.e.``use_var="equal"`` overestimates the simulated power by a few percent.
The equal variance assumption does not hold in this example.
>>> from statsmodels.stats.oneway import effectsize_oneway
>>> from statsmodels.stats.power import FTestAnovaPower
>>>
>>> nobs = np.array([10, 12, 13, 15])
>>> delta = 0.5
>>> means_alt = np.array([-1, 0, 0, 1]) * delta
>>> vars_ = np.arange(1, len(means_alt) + 1)
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="equal")
>>> f2_alt
0.04581300813008131
>>>
>>> kwds = {'effect_size': np.sqrt(f2_alt), 'nobs': 100, 'alpha': 0.05,
... 'k_groups': 4}
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.39165892158983273
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="unequal")
>>> f2_alt
0.060640138408304504
>>>
>>> kwds['effect_size'] = np.sqrt(f2_alt)
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.5047366512800622
>>>
>>> f2_alt = effectsize_oneway(means_alt, vars_, nobs, use_var="bf")
>>> f2_alt
0.04391324307956788
>>>
>>> kwds['effect_size'] = np.sqrt(f2_alt)
>>> power = FTestAnovaPower().power(**kwds)
>>> power
0.3765792117047725
"""
# the code here is largely a copy of onway_generic with adjustments
means = np.asarray(means)
n_groups = means.shape[0]
if np.size(nobs) == 1:
nobs = np.ones(n_groups) * nobs
nobs_t = nobs.sum()
if use_var == "equal":
if np.size(vars_) == 1:
var_resid = vars_
else:
vars_ = np.asarray(vars_)
var_resid = ((nobs - 1) * vars_).sum() / (nobs_t - n_groups)
vars_ = var_resid # scalar, if broadcasting works
weights = nobs / vars_
w_total = weights.sum()
w_rel = weights / w_total
# meanw_t = (weights * means).sum() / w_total
meanw_t = w_rel @ means
f2 = np.dot(weights, (means - meanw_t)**2) / (nobs_t - ddof_between)
if use_var.lower() == "bf":
weights = nobs
w_total = weights.sum()
w_rel = weights / w_total
meanw_t = w_rel @ means
# TODO: reuse general case with weights
tmp = ((1. - nobs / nobs_t) * vars_).sum()
statistic = 1. * (nobs * (means - meanw_t)**2).sum()
statistic /= tmp
f2 = statistic * (1. - nobs / nobs_t).sum() / nobs_t
# correction factor for df_num in BFM
df_num2 = n_groups - 1
df_num = tmp**2 / ((vars_**2).sum() +
(nobs / nobs_t * vars_).sum()**2 -
2 * (nobs / nobs_t * vars_**2).sum())
f2 *= df_num / df_num2
return f2
def convert_effectsize_fsqu(f2=None, eta2=None):
"""Convert squared effect sizes in f family
f2 is signal to noise ratio, var_explained / var_residual
eta2 is proportion of explained variance, var_explained / var_total
uses the relationship:
f2 = eta2 / (1 - eta2)
Parameters
----------
f2 : None or float
Squared Cohen's F effect size. If f2 is not None, then eta2 will be
computed.
eta2 : None or float
Squared eta effect size. If f2 is None and eta2 is not None, then f2 is
computed.
Returns
-------
res : Holder instance
An instance of the Holder class with f2 and eta2 as attributes.
"""
if f2 is not None:
eta2 = 1 / (1 + 1 / f2)
elif eta2 is not None:
f2 = eta2 / (1 - eta2)
res = Holder(f2=f2, eta2=eta2)
return res
def _fstat2effectsize(f_stat, df):
"""Compute anova effect size from F-statistic
This might be combined with convert_effectsize_fsqu
Parameters
----------
f_stat : array_like
Test statistic of an F-test
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
Returns
-------
res : Holder instance
This instance contains effect size measures f2, eta2, omega2 and eps2
as attributes.
Notes
-----
This uses the following definitions:
- f2 = f_stat * df1 / df2
- eta2 = f2 / (f2 + 1)
- omega2 = (f2 - df1 / df2) / (f2 + 2)
- eps2 = (f2 - df1 / df2) / (f2 + 1)
This differs from effect size measures in other function which define
``f2 = f_stat * df1 / nobs``
or an equivalent expression for power computation. The noncentrality
index for the hypothesis test is in those cases given by
``nc = f_stat * df1``.
Currently omega2 and eps2 are computed in two different ways. Those
values agree for regular cases but can show different behavior in corner
cases (e.g. zero division).
"""
df1, df2 = df
f2 = f_stat * df1 / df2
eta2 = f2 / (f2 + 1)
omega2_ = (f_stat - 1) / (f_stat + (df2 + 1) / df1)
omega2 = (f2 - df1 / df2) / (f2 + 1 + 1 / df2) # rewrite
eps2_ = (f_stat - 1) / (f_stat + df2 / df1)
eps2 = (f2 - df1 / df2) / (f2 + 1) # rewrite
return Holder(f2=f2, eta2=eta2, omega2=omega2, eps2=eps2, eps2_=eps2_,
omega2_=omega2_)
# conversion functions for Wellek's equivalence effect size
# these are mainly to compare with literature
def wellek_to_f2(eps, n_groups):
"""Convert Wellek's effect size (sqrt) to Cohen's f-squared
This computes the following effect size :
f2 = 1 / n_groups * eps**2
Parameters
----------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
n_groups : int
Number of groups in oneway comparison
Returns
-------
f2 : effect size Cohen's f-squared
"""
f2 = 1 / n_groups * eps**2
return f2
def f2_to_wellek(f2, n_groups):
"""Convert Cohen's f-squared to Wellek's effect size (sqrt)
This computes the following effect size :
eps = sqrt(n_groups * f2)
Parameters
----------
f2 : float or ndarray
Effect size Cohen's f-squared
n_groups : int
Number of groups in oneway comparison
Returns
-------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
"""
eps = np.sqrt(n_groups * f2)
return eps
def fstat_to_wellek(f_stat, n_groups, nobs_mean):
"""Convert F statistic to wellek's effect size eps squared
This computes the following effect size :
es = f_stat * (n_groups - 1) / nobs_mean
Parameters
----------
f_stat : float or ndarray
Test statistic of an F-test.
n_groups : int
Number of groups in oneway comparison
nobs_mean : float or ndarray
Average number of observations across groups.
Returns
-------
eps : float or ndarray
Wellek's effect size used in anova equivalence test
"""
es = f_stat * (n_groups - 1) / nobs_mean
return es
def confint_noncentrality(f_stat, df, alpha=0.05,
alternative="two-sided"):
"""
Confidence interval for noncentrality parameter in F-test
This does not yet handle non-negativity constraint on nc.
Currently only two-sided alternative is supported.
Parameters
----------
f_stat : float
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float, default 0.05
alternative : {"two-sided"}
Other alternatives have not been implements.
Returns
-------
float
The end point of the confidence interval.
Notes
-----
The algorithm inverts the cdf of the noncentral F distribution with
respect to the noncentrality parameters.
See Steiger 2004 and references cited in it.
References
----------
.. [1] Steiger, James H. 2004. “Beyond the F Test: Effect Size Confidence
Intervals and Tests of Close Fit in the Analysis of Variance and
Contrast Analysis.” Psychological Methods 9 (2): 164–82.
https://doi.org/10.1037/1082-989X.9.2.164.
See Also
--------
confint_effectsize_oneway
"""
df1, df2 = df
if alternative in ["two-sided", "2s", "ts"]:
alpha1s = alpha / 2
ci = ncfdtrinc(df1, df2, [1 - alpha1s, alpha1s], f_stat)
else:
raise NotImplementedError
return ci
def confint_effectsize_oneway(f_stat, df, alpha=0.05, nobs=None):
"""
Confidence interval for effect size in oneway anova for F distribution
This does not yet handle non-negativity constraint on nc.
Currently only two-sided alternative is supported.
Parameters
----------
f_stat : float
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float, default 0.05
nobs : int, default None
Returns
-------
Holder
Class with effect size and confidence attributes
Notes
-----
The confidence interval for the noncentrality parameter is obtained by
inverting the cdf of the noncentral F distribution. Confidence intervals
for other effect sizes are computed by endpoint transformation.
R package ``effectsize`` does not compute the confidence intervals in the
same way. Their confidence intervals can be replicated with
>>> ci_nc = confint_noncentrality(f_stat, df1, df2, alpha=0.1)
>>> ci_es = smo._fstat2effectsize(ci_nc / df1, df1, df2)
See Also
--------
confint_noncentrality
"""
df1, df2 = df
if nobs is None:
nobs = df1 + df2 + 1
ci_nc = confint_noncentrality(f_stat, df, alpha=alpha)
ci_f2 = ci_nc / nobs
ci_res = convert_effectsize_fsqu(f2=ci_f2)
ci_res.ci_omega2 = (ci_f2 - df1 / df2) / (ci_f2 + 1 + 1 / df2)
ci_res.ci_nc = ci_nc
ci_res.ci_f = np.sqrt(ci_res.f2)
ci_res.ci_eta = np.sqrt(ci_res.eta2)
ci_res.ci_f_corrected = np.sqrt(ci_res.f2 * (df1 + 1) / df1)
return ci_res
def anova_generic(means, variances, nobs, use_var="unequal",
welch_correction=True, info=None):
"""
Oneway Anova based on summary statistics
Parameters
----------
means : array_like
Mean of samples to be compared
variances : float or array_like
Residual (within) variance of each sample or pooled.
If ``variances`` is scalar, then it is interpreted as pooled variance
that is the same for all samples, ``use_var`` will be ignored.
Otherwise, the variances are used depending on the ``use_var`` keyword.
nobs : int or array_like
Number of observations for the samples.
If nobs is scalar, then it is assumed that all samples have the same
number ``nobs`` of observation, i.e. a balanced sample case.
Otherwise, statistics will be weighted corresponding to nobs.
Only relative sizes are relevant, any proportional change to nobs does
not change the effect size.
use_var : {"unequal", "equal", "bf"}
If ``use_var`` is "unequal", then the variances can differ across
samples and the effect size for Welch anova will be computed.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
info : not used yet
Returns
-------
res : results instance
This includes `statistic` and `pvalue`.
"""
options = {"use_var": use_var,
"welch_correction": welch_correction
}
if means.ndim != 1:
raise ValueError('data (means, ...) has to be one-dimensional')
nobs_t = nobs.sum()
n_groups = len(means)
# mean_t = (nobs * means).sum() / nobs_t
if use_var == "unequal":
weights = nobs / variances
else:
weights = nobs
w_total = weights.sum()
w_rel = weights / w_total
# meanw_t = (weights * means).sum() / w_total
meanw_t = w_rel @ means
statistic = np.dot(weights, (means - meanw_t)**2) / (n_groups - 1.)
df_num = n_groups - 1.
if use_var == "unequal":
tmp = ((1 - w_rel)**2 / (nobs - 1)).sum() / (n_groups**2 - 1)
if welch_correction:
statistic /= 1 + 2 * (n_groups - 2) * tmp
df_denom = 1. / (3. * tmp)
elif use_var == "equal":
# variance of group demeaned total sample, pooled var_resid
tmp = ((nobs - 1) * variances).sum() / (nobs_t - n_groups)
statistic /= tmp
df_denom = nobs_t - n_groups
elif use_var == "bf":
tmp = ((1. - nobs / nobs_t) * variances).sum()
statistic = 1. * (nobs * (means - meanw_t)**2).sum()
statistic /= tmp
df_num2 = n_groups - 1
df_denom = tmp**2 / ((1. - nobs / nobs_t) ** 2 *
variances ** 2 / (nobs - 1)).sum()
df_num = tmp**2 / ((variances ** 2).sum() +
(nobs / nobs_t * variances).sum() ** 2 -
2 * (nobs / nobs_t * variances ** 2).sum())
pval2 = stats.f.sf(statistic, df_num2, df_denom)
options["df2"] = (df_num2, df_denom)
options["df_num2"] = df_num2
options["pvalue2"] = pval2
else:
raise ValueError('use_var is to be one of "unequal", "equal" or "bf"')
pval = stats.f.sf(statistic, df_num, df_denom)
res = HolderTuple(statistic=statistic,
pvalue=pval,
df=(df_num, df_denom),
df_num=df_num,
df_denom=df_denom,
nobs_t=nobs_t,
n_groups=n_groups,
means=means,
nobs=nobs,
vars_=variances,
**options
)
return res
def anova_oneway(data, groups=None, use_var="unequal", welch_correction=True,
trim_frac=0):
"""Oneway Anova
This implements standard anova, Welch and Brown-Forsythe, and trimmed
(Yuen) variants of those.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2.
The data can be provided as a tuple or list of arrays or in long
format with outcome observations in ``data`` and group membership in
``groups``.
groups : ndarray or Series
If data is in long format, then groups is needed as indicator to which
group or sample and observations belongs.
use_var : {"unequal", "equal" or "bf"}
`use_var` specified how to treat heteroscedasticity, unequal variance,
across samples. Three approaches are available
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf: Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
trim_frac : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
The number of trimmed observations is the fraction of number of
observations in the sample truncated to the next lower integer.
`trim_frac` has to be smaller than 0.5, however, if the fraction is
so large that there are not enough observations left over, then `nan`
will be returned.
Returns
-------
res : results instance
The returned HolderTuple instance has the following main attributes
and some additional information in other attributes.
statistic : float
Test statistic for k-sample mean comparison which is approximately
F-distributed.
pvalue : float
If ``use_var="bf"``, then the p-value is based on corrected
degrees of freedom following Mehrotra 1997.
pvalue2 : float
This is the p-value based on degrees of freedom as in
Brown-Forsythe 1974 and is only available if ``use_var="bf"``.
df = (df_denom, df_num) : tuple of floats
Degreeds of freedom for the F-distribution depend on ``use_var``.
If ``use_var="bf"``, then `df_denom` is for Mehrotra p-values
`df_denom2` is available for Brown-Forsythe 1974 p-values.
`df_num` is the same numerator degrees of freedom for both
p-values.
Notes
-----
Welch's anova is correctly sized (not liberal or conservative) in smaller
samples if the distribution of the samples is not very far away from the
normal distribution. The test can become liberal if the data is strongly
skewed. Welch's Anova can also be correctly sized for discrete
distributions with finite support, like Lickert scale data.
The trimmed version is robust to many non-normal distributions, it stays
correctly sized in many cases, and is more powerful in some cases with
skewness or heavy tails.
Trimming is currently based on the integer part of ``nobs * trim_frac``.
The default might change to including fractional observations as in the
original articles by Yuen.
See Also
--------
anova_generic
References
----------
Brown, Morton B., and Alan B. Forsythe. 1974. “The Small Sample Behavior
of Some Statistics Which Test the Equality of Several Means.”
Technometrics 16 (1) (February 1): 129–132. doi:10.2307/1267501.
Mehrotra, Devan V. 1997. “Improving the Brown-Forsythe Solution to the
Generalized Behrens-Fisher Problem.” Communications in Statistics -
Simulation and Computation 26 (3): 1139–1145.
doi:10.1080/03610919708813431.
"""
if groups is not None:
uniques = np.unique(groups)
data = [data[groups == uni] for uni in uniques]
else:
# uniques = None # not used yet, add to info?
pass
args = list(map(np.asarray, data))
if any([x.ndim != 1 for x in args]):
raise ValueError('data arrays have to be one-dimensional')
nobs = np.array([len(x) for x in args], float)
# n_groups = len(args) # not used
# means = np.array([np.mean(x, axis=0) for x in args], float)
# vars_ = np.array([np.var(x, ddof=1, axis=0) for x in args], float)
if trim_frac == 0:
means = np.array([x.mean() for x in args])
vars_ = np.array([x.var(ddof=1) for x in args])
else:
tms = [TrimmedMean(x, trim_frac) for x in args]
means = np.array([tm.mean_trimmed for tm in tms])
# R doesn't use uncorrected var_winsorized
# vars_ = np.array([tm.var_winsorized for tm in tms])
vars_ = np.array([tm.var_winsorized * (tm.nobs - 1) /
(tm.nobs_reduced - 1) for tm in tms])
# nobs_original = nobs # store just in case
nobs = np.array([tm.nobs_reduced for tm in tms])
res = anova_generic(means, vars_, nobs, use_var=use_var,
welch_correction=welch_correction)
return res
def equivalence_oneway_generic(f_stat, n_groups, nobs, equiv_margin, df,
alpha=0.05, margin_type="f2"):
"""Equivalence test for oneway anova (Wellek and extensions)
This is an helper function when summary statistics are available.
Use `equivalence_oneway` instead.
The null hypothesis is that the means differ by more than `equiv_margin`
in the anova distance measure.
If the Null is rejected, then the data supports that means are equivalent,
i.e. within a given distance.
Parameters
----------
f_stat : float
F-statistic
n_groups : int
Number of groups in oneway comparison.
nobs : ndarray
Array of number of observations in groups.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
df : tuple
degrees of freedom ``df = (df1, df2)`` where
- df1 : numerator degrees of freedom, number of constraints
- df2 : denominator degrees of freedom, df_resid
alpha : float in (0, 1)
Significance level for the hypothesis test.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin.
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
Notes
-----
Equivalence in this function is defined in terms of a squared distance
measure similar to Mahalanobis distance.
Alternative definitions for the oneway case are based on maximum difference
between pairs of means or similar pairwise distances.
The equivalence margin is used for the noncentrality parameter in the
noncentral F distribution for the test statistic. In samples with unequal
variances estimated using Welch or Brown-Forsythe Anova, the f-statistic
depends on the unequal variances and corrections to the test statistic.
This means that the equivalence margins are not fully comparable across
methods for treating unequal variances.
References
----------
Wellek, Stefan. 2010. Testing Statistical Hypotheses of Equivalence and
Noninferiority. 2nd ed. Boca Raton: CRC Press.
Cribbie, Robert A., Chantal A. Arpin-Cribbie, and Jamie A. Gruman. 2009.
“Tests of Equivalence for One-Way Independent Groups Designs.” The Journal
of Experimental Education 78 (1): 1–13.
https://doi.org/10.1080/00220970903224552.
Jan, Show-Li, and Gwowen Shieh. 2019. “On the Extended Welch Test for
Assessing Equivalence of Standardized Means.” Statistics in
Biopharmaceutical Research 0 (0): 1–8.
https://doi.org/10.1080/19466315.2019.1654915.
"""
nobs_t = nobs.sum()
nobs_mean = nobs_t / n_groups
if margin_type == "wellek":
nc_null = nobs_mean * equiv_margin**2
es = f_stat * (n_groups - 1) / nobs_mean
type_effectsize = "Wellek's psi_squared"
elif margin_type in ["f2", "fsqu", "fsquared"]:
nc_null = nobs_t * equiv_margin
es = f_stat / nobs_t
type_effectsize = "Cohen's f_squared"
else:
raise ValueError('`margin_type` should be "f2" or "wellek"')
crit_f = ncf_ppf(alpha, df[0], df[1], nc_null)
if margin_type == "wellek":
# TODO: do we need a sqrt
crit_es = crit_f * (n_groups - 1) / nobs_mean
elif margin_type in ["f2", "fsqu", "fsquared"]:
crit_es = crit_f / nobs_t
reject = (es < crit_es)
pv = ncf_cdf(f_stat, df[0], df[1], nc_null)
pwr = ncf_cdf(crit_f, df[0], df[1], 1e-13) # scipy, cannot be 0
res = HolderTuple(statistic=f_stat,
pvalue=pv,
effectsize=es, # match es type to margin_type
crit_f=crit_f,
crit_es=crit_es,
reject=reject,
power_zero=pwr,
df=df,
f_stat=f_stat,
type_effectsize=type_effectsize
)
return res
def equivalence_oneway(data, equiv_margin, groups=None, use_var="unequal",
welch_correction=True, trim_frac=0, margin_type="f2"):
"""equivalence test for oneway anova (Wellek's Anova)
The null hypothesis is that the means differ by more than `equiv_margin`
in the anova distance measure.
If the Null is rejected, then the data supports that means are equivalent,
i.e. within a given distance.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2.
The data can be provided as a tuple or list of arrays or in long
format with outcome observations in ``data`` and group membership in
``groups``.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
groups : ndarray or Series
If data is in long format, then groups is needed as indicator to which
group or sample and observations belongs.
use_var : {"unequal", "equal" or "bf"}
`use_var` specified how to treat heteroscedasticity, unequal variance,
across samples. Three approaches are available
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf: Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
welch_correction : bool
If this is false, then the Welch correction to the test statistic is
not included. This allows the computation of an effect size measure
that corresponds more closely to Cohen's f.
trim_frac : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
The number of trimmed observations is the fraction of number of
observations in the sample truncated to the next lower integer.
`trim_frac` has to be smaller than 0.5, however, if the fraction is
so large that there are not enough observations left over, then `nan`
will be returned.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin, either squared
Cohen's f or Wellek's psi. Default is "f2".
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
See Also
--------
anova_oneway
equivalence_scale_oneway
"""
# use anova to compute summary statistics and f-statistic
res0 = anova_oneway(data, groups=groups, use_var=use_var,
welch_correction=welch_correction,
trim_frac=trim_frac)
f_stat = res0.statistic
res = equivalence_oneway_generic(f_stat, res0.n_groups, res0.nobs_t,
equiv_margin, res0.df, alpha=0.05,
margin_type=margin_type)
return res
def _power_equivalence_oneway_emp(f_stat, n_groups, nobs, eps, df, alpha=0.05):
"""Empirical power of oneway equivalence test
This only returns post-hoc, empirical power.
Warning: eps is currently effect size margin as defined as in Wellek, and
not the signal to noise ratio (Cohen's f family).
Parameters
----------
f_stat : float
F-statistic from oneway anova, used to compute empirical effect size
n_groups : int
Number of groups in oneway comparison.
nobs : ndarray
Array of number of observations in groups.
eps : float
Equivalence margin in terms of effect size given by Wellek's psi.
df : tuple
Degrees of freedom for F distribution.
alpha : float in (0, 1)
Significance level for the hypothesis test.
Returns
-------
pow : float
Ex-post, post-hoc or empirical power at f-statistic of the equivalence
test.
"""
res = equivalence_oneway_generic(f_stat, n_groups, nobs, eps, df,
alpha=alpha, margin_type="wellek")
nobs_mean = nobs.sum() / n_groups
fn = f_stat # post-hoc power, empirical power at estimate
esn = fn * (n_groups - 1) / nobs_mean # Wellek psi
pow_ = ncf_cdf(res.crit_f, df[0], df[1], nobs_mean * esn)
return pow_
def power_equivalence_oneway(f2_alt, equiv_margin, nobs_t, n_groups=None,
df=None, alpha=0.05, margin_type="f2"):
"""
Power of oneway equivalence test
Parameters
----------
f2_alt : float
Effect size, squared Cohen's f, under the alternative.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
nobs_t : ndarray
Total number of observations summed over all groups.
n_groups : int
Number of groups in oneway comparison. If margin_type is "wellek",
then either ``n_groups`` or ``df`` has to be given.
df : tuple
Degrees of freedom for F distribution,
``df = (n_groups - 1, nobs_t - n_groups)``
alpha : float in (0, 1)
Significance level for the hypothesis test.
margin_type : "f2" or "wellek"
Type of effect size used for equivalence margin, either squared
Cohen's f or Wellek's psi. Default is "f2".
Returns
-------
pow_alt : float
Power of the equivalence test at given equivalence effect size under
the alternative.
"""
# one of n_groups or df has to be specified
if df is None:
if n_groups is None:
raise ValueError("either df or n_groups has to be provided")
df = (n_groups - 1, nobs_t - n_groups)
# esn = fn * (n_groups - 1) / nobs_mean # Wellek psi
# fix for scipy, ncf does not allow nc == 0, fixed in scipy master
if f2_alt == 0:
f2_alt = 1e-13
# effect size, critical value at margin
# f2_null = equiv_margin
if margin_type in ["f2", "fsqu", "fsquared"]:
f2_null = equiv_margin
elif margin_type == "wellek":
if n_groups is None:
raise ValueError("If margin_type is wellek, then n_groups has "
"to be provided")
# f2_null = (n_groups - 1) * n_groups / nobs_t * equiv_margin**2
nobs_mean = nobs_t / n_groups
f2_null = nobs_mean * equiv_margin**2 / nobs_t
f2_alt = nobs_mean * f2_alt**2 / nobs_t
else:
raise ValueError('`margin_type` should be "f2" or "wellek"')
crit_f_margin = ncf_ppf(alpha, df[0], df[1], nobs_t * f2_null)
pwr_alt = ncf_cdf(crit_f_margin, df[0], df[1], nobs_t * f2_alt)
return pwr_alt
def simulate_power_equivalence_oneway(means, nobs, equiv_margin, vars_=None,
k_mc=1000, trim_frac=0,
options_var=None, margin_type="f2"
): # , anova_options=None): #TODO
"""Simulate Power for oneway equivalence test (Wellek's Anova)
This function is experimental and written to evaluate asymptotic power
function. This function will change without backwards compatibility
constraints. The only part that is stable is `pvalue` attribute in results.
Effect size for equivalence margin
"""
if options_var is None:
options_var = ["unequal", "equal", "bf"]
if vars_ is not None:
stds = np.sqrt(vars_)
else:
stds = np.ones(len(means))
nobs_mean = nobs.mean()
n_groups = len(nobs)
res_mc = []
f_mc = []
reject_mc = []
other_mc = []
for _ in range(k_mc):
y0, y1, y2, y3 = [m + std * np.random.randn(n)
for (n, m, std) in zip(nobs, means, stds)]
res_i = []
f_i = []
reject_i = []
other_i = []
for uv in options_var:
# for welch in options_welch:
# res1 = sma.anova_generic(means, vars_, nobs, use_var=uv,
# welch_correction=welch)
res0 = anova_oneway([y0, y1, y2, y3], use_var=uv,
trim_frac=trim_frac)
f_stat = res0.statistic
res1 = equivalence_oneway_generic(f_stat, n_groups, nobs.sum(),
equiv_margin, res0.df,
alpha=0.05,
margin_type=margin_type)
res_i.append(res1.pvalue)
es_wellek = f_stat * (n_groups - 1) / nobs_mean
f_i.append(es_wellek)
reject_i.append(res1.reject)
other_i.extend([res1.crit_f, res1.crit_es, res1.power_zero])
res_mc.append(res_i)
f_mc.append(f_i)
reject_mc.append(reject_i)
other_mc.append(other_i)
f_mc = np.asarray(f_mc)
other_mc = np.asarray(other_mc)
res_mc = np.asarray(res_mc)
reject_mc = np.asarray(reject_mc)
res = Holder(f_stat=f_mc,
other=other_mc,
pvalue=res_mc,
reject=reject_mc
)
return res
def test_scale_oneway(data, method="bf", center="median", transform="abs",
trim_frac_mean=0.1, trim_frac_anova=0.0):
"""Oneway Anova test for equal scale, variance or dispersion
This hypothesis test performs a oneway anova test on transformed data and
includes Levene and Brown-Forsythe tests for equal variances as special
cases.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2. The data can be provided
as a tuple or list of arrays or in long format with outcome
observations in ``data`` and group membership in ``groups``.
method : {"unequal", "equal" or "bf"}
How to treat heteroscedasticity across samples. This is used as
`use_var` option in `anova_oneway` and refers to the variance of the
transformed data, i.e. assumption is on 4th moment if squares are used
as transform.
Three approaches are available:
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf" : Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
center : "median", "mean", "trimmed" or float
Statistic used for centering observations. If a float, then this
value is used to center. Default is median.
transform : "abs", "square" or callable
Transformation for the centered observations. If a callable, then this
function is called on the centered data.
Default is absolute value.
trim_frac_mean=0.1 : float in [0, 0.5)
Trim fraction for the trimmed mean when `center` is "trimmed"
trim_frac_anova : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and Winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
see ``trim_frac`` option in `anova_oneway`
Returns
-------
res : results instance
The returned HolderTuple instance has the following main attributes
and some additional information in other attributes.
statistic : float
Test statistic for k-sample mean comparison which is approximately
F-distributed.
pvalue : float
If ``method="bf"``, then the p-value is based on corrected
degrees of freedom following Mehrotra 1997.
pvalue2 : float
This is the p-value based on degrees of freedom as in
Brown-Forsythe 1974 and is only available if ``method="bf"``.
df : (df_denom, df_num)
Tuple containing degrees of freedom for the F-distribution depend
on ``method``. If ``method="bf"``, then `df_denom` is for Mehrotra
p-values `df_denom2` is available for Brown-Forsythe 1974 p-values.
`df_num` is the same numerator degrees of freedom for both
p-values.
See Also
--------
anova_oneway
scale_transform
"""
data = map(np.asarray, data)
xxd = [scale_transform(x, center=center, transform=transform,
trim_frac=trim_frac_mean) for x in data]
res = anova_oneway(xxd, groups=None, use_var=method,
welch_correction=True, trim_frac=trim_frac_anova)
res.data_transformed = xxd
return res
def equivalence_scale_oneway(data, equiv_margin, method='bf', center='median',
transform='abs', trim_frac_mean=0.,
trim_frac_anova=0.):
"""Oneway Anova test for equivalence of scale, variance or dispersion
This hypothesis test performs a oneway equivalence anova test on
transformed data.
Note, the interpretation of the equivalence margin `equiv_margin` will
depend on the transformation of the data. Transformations like
absolute deviation are not scaled to correspond to the variance under
normal distribution.
Parameters
----------
data : tuple of array_like or DataFrame or Series
Data for k independent samples, with k >= 2. The data can be provided
as a tuple or list of arrays or in long format with outcome
observations in ``data`` and group membership in ``groups``.
equiv_margin : float
Equivalence margin in terms of effect size. Effect size can be chosen
with `margin_type`. default is squared Cohen's f.
method : {"unequal", "equal" or "bf"}
How to treat heteroscedasticity across samples. This is used as
`use_var` option in `anova_oneway` and refers to the variance of the
transformed data, i.e. assumption is on 4th moment if squares are used
as transform.
Three approaches are available:
"unequal" : Variances are not assumed to be equal across samples.
Heteroscedasticity is taken into account with Welch Anova and
Satterthwaite-Welch degrees of freedom.
This is the default.
"equal" : Variances are assumed to be equal across samples.
This is the standard Anova.
"bf" : Variances are not assumed to be equal across samples.
The method is Browne-Forsythe (1971) for testing equality of means
with the corrected degrees of freedom by Merothra. The original BF
degrees of freedom are available as additional attributes in the
results instance, ``df_denom2`` and ``p_value2``.
center : "median", "mean", "trimmed" or float
Statistic used for centering observations. If a float, then this
value is used to center. Default is median.
transform : "abs", "square" or callable
Transformation for the centered observations. If a callable, then this
function is called on the centered data.
Default is absolute value.
trim_frac_mean : float in [0, 0.5)
Trim fraction for the trimmed mean when `center` is "trimmed"
trim_frac_anova : float in [0, 0.5)
Optional trimming for Anova with trimmed mean and Winsorized variances.
With the default trim_frac equal to zero, the oneway Anova statistics
are computed without trimming. If `trim_frac` is larger than zero,
then the largest and smallest observations in each sample are trimmed.
see ``trim_frac`` option in `anova_oneway`
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
See Also
--------
anova_oneway
scale_transform
equivalence_oneway
"""
data = map(np.asarray, data)
xxd = [scale_transform(x, center=center, transform=transform,
trim_frac=trim_frac_mean) for x in data]
res = equivalence_oneway(xxd, equiv_margin, use_var=method,
welch_correction=True, trim_frac=trim_frac_anova)
res.x_transformed = xxd
return res
|
ddf2c34c96889dfee06a108ef609935d4670d972
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/KoubeiRetailWmsOutboundworkCreateModel.py
|
637db579824b5b3c8c1ad184527640eaac28de46
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,334
|
py
|
KoubeiRetailWmsOutboundworkCreateModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OperateContext import OperateContext
from alipay.aop.api.domain.WorkDetail import WorkDetail
class KoubeiRetailWmsOutboundworkCreateModel(object):
def __init__(self):
self._ext_info = None
self._notice_order_id = None
self._operate_context = None
self._out_biz_no = None
self._remark = None
self._work_details = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def notice_order_id(self):
return self._notice_order_id
@notice_order_id.setter
def notice_order_id(self, value):
self._notice_order_id = value
@property
def operate_context(self):
return self._operate_context
@operate_context.setter
def operate_context(self, value):
if isinstance(value, OperateContext):
self._operate_context = value
else:
self._operate_context = OperateContext.from_alipay_dict(value)
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def work_details(self):
return self._work_details
@work_details.setter
def work_details(self, value):
if isinstance(value, list):
self._work_details = list()
for i in value:
if isinstance(i, WorkDetail):
self._work_details.append(i)
else:
self._work_details.append(WorkDetail.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.notice_order_id:
if hasattr(self.notice_order_id, 'to_alipay_dict'):
params['notice_order_id'] = self.notice_order_id.to_alipay_dict()
else:
params['notice_order_id'] = self.notice_order_id
if self.operate_context:
if hasattr(self.operate_context, 'to_alipay_dict'):
params['operate_context'] = self.operate_context.to_alipay_dict()
else:
params['operate_context'] = self.operate_context
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.work_details:
if isinstance(self.work_details, list):
for i in range(0, len(self.work_details)):
element = self.work_details[i]
if hasattr(element, 'to_alipay_dict'):
self.work_details[i] = element.to_alipay_dict()
if hasattr(self.work_details, 'to_alipay_dict'):
params['work_details'] = self.work_details.to_alipay_dict()
else:
params['work_details'] = self.work_details
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiRetailWmsOutboundworkCreateModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'notice_order_id' in d:
o.notice_order_id = d['notice_order_id']
if 'operate_context' in d:
o.operate_context = d['operate_context']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'remark' in d:
o.remark = d['remark']
if 'work_details' in d:
o.work_details = d['work_details']
return o
|
f9db37baddb8202e84dbdf87ae3f65d6aa319741
|
08a8c973eaa984b96be2306c325c4a0b3f997aec
|
/py3status/modules/dpms.py
|
98469ec2ad3dce2f63b676bff4970f083e697615
|
[] |
permissive
|
ultrabug/py3status
|
889ec6679b7aa7d886bc98d86fc4051c7529b469
|
7ada9276ee12fe80491768d60603f8c5e1dc0639
|
refs/heads/master
| 2023-08-24T02:40:10.865393
| 2023-07-29T15:51:42
| 2023-07-29T15:51:42
| 8,292,338
| 934
| 426
|
BSD-3-Clause
| 2023-09-10T09:21:17
| 2013-02-19T14:59:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,732
|
py
|
dpms.py
|
"""
Turn on and off DPMS and screen saver blanking.
Configuration parameters:
button_off: mouse button to turn off screen (default None)
button_toggle: mouse button to toggle DPMS (default 1)
cache_timeout: refresh interval for this module (default 15)
format: display format for this module (default '{icon}')
icon_off: show when DPMS is disabled (default 'DPMS')
icon_on: show when DPMS is enabled (default 'DPMS')
Format placeholders:
{icon} DPMS icon
Color options:
color_on: Enabled, defaults to color_good
color_off: Disabled, defaults to color_bad
@author Andre Doser <dosera AT tf.uni-freiburg.de>
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': 'DPMS'}
off
{'color': '#FF0000', 'full_text': 'DPMS'}
"""
class Py3status:
""" """
# available configuration parameters
button_off = None
button_toggle = 1
cache_timeout = 15
format = "{icon}"
icon_off = "DPMS"
icon_on = "DPMS"
class Meta:
deprecated = {
"rename": [
{
"param": "format_on",
"new": "icon_on",
"msg": "obsolete parameter use `icon_on`",
},
{
"param": "format_off",
"new": "icon_off",
"msg": "obsolete parameter use `icon_off`",
},
]
}
def post_config_hook(self):
self.color_on = self.py3.COLOR_ON or self.py3.COLOR_GOOD
self.color_off = self.py3.COLOR_OFF or self.py3.COLOR_BAD
def dpms(self):
"""
Display a colorful state of DPMS.
"""
if "DPMS is Enabled" in self.py3.command_output("xset -q"):
_format = self.icon_on
color = self.color_on
else:
_format = self.icon_off
color = self.color_off
icon = self.py3.safe_format(_format)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"icon": icon}),
"color": color,
}
def on_click(self, event):
"""
Control DPMS with mouse clicks.
"""
if event["button"] == self.button_toggle:
if "DPMS is Enabled" in self.py3.command_output("xset -q"):
self.py3.command_run("xset -dpms s off")
else:
self.py3.command_run("xset +dpms s on")
if event["button"] == self.button_off:
self.py3.command_run("xset dpms force off")
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
b20d83cec8bfd0d638b02eb85faad65419a6b5cc
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/stats/tests/test_nonparametric.py
|
5d874b43744e2623902601b10cbf4956d27e2485
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 18,623
|
py
|
test_nonparametric.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 05 14:05:24 2013
Aug 15 2020: add brunnermunzel, rank_compare_2indep
Author: Josef Perktold
"""
from statsmodels.compat.python import lzip
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_)
from scipy import stats
import pytest
from statsmodels.stats.contingency_tables import (
mcnemar, cochrans_q, SquareTable)
from statsmodels.sandbox.stats.runs import (Runs,
runstest_1samp, runstest_2samp)
from statsmodels.sandbox.stats.runs import mcnemar as sbmcnemar
from statsmodels.stats.nonparametric import (
rank_compare_2indep, rank_compare_2ordinal, prob_larger_continuous,
cohensd2problarger)
from statsmodels.tools.testing import Holder
def _expand_table(table):
'''expand a 2 by 2 contingency table to observations
'''
return np.repeat([[1, 1], [1, 0], [0, 1], [0, 0]], table.ravel(), axis=0)
def test_mcnemar_exact():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
f_obs4 = np.array([[101, 30], [60, 33]])
f_obs5 = np.array([[101, 10], [30, 33]])
f_obs6 = np.array([[101, 10], [10, 33]])
#vassar college online computation
res1 = 0.000004
res2 = 0.378688
res3 = 0.089452
res4 = 0.00206
res5 = 0.002221
res6 = 1.
stat = mcnemar(f_obs1, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res1], decimal=6)
stat = mcnemar(f_obs2, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res2], decimal=6)
stat = mcnemar(f_obs3, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res3], decimal=6)
stat = mcnemar(f_obs4, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [30, res4], decimal=6)
stat = mcnemar(f_obs5, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res5], decimal=6)
stat = mcnemar(f_obs6, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res6], decimal=6)
def test_mcnemar_chisquare():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
#> mcn = mcnemar.test(matrix(c(101, 121, 59, 33),nrow=2))
res1 = [2.067222e01, 5.450095e-06]
res2 = [0.7751938, 0.3786151]
res3 = [2.87769784, 0.08981434]
stat = mcnemar(f_obs1, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res1, rtol=1e-6)
stat = mcnemar(f_obs2, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res2, rtol=1e-6)
stat = mcnemar(f_obs3, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res3, rtol=1e-6)
# test correction = False
res1 = [2.135556e01, 3.815136e-06]
res2 = [0.9379845, 0.3327967]
res3 = [3.17266187, 0.07488031]
res = mcnemar(f_obs1, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res1, rtol=1e-6)
res = mcnemar(f_obs2, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res2, rtol=1e-6)
res = mcnemar(f_obs3, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res3, rtol=1e-6)
def test_mcnemar_vectorized(reset_randomstate):
ttk = np.random.randint(5,15, size=(2,2,3))
with pytest.warns(FutureWarning):
res = sbmcnemar(ttk, exact=False)
with pytest.warns(FutureWarning):
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.warns(FutureWarning):
res = sbmcnemar(ttk, exact=False, correction=False)
with pytest.warns(FutureWarning):
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False, correction=False)
for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.warns(FutureWarning):
res = sbmcnemar(ttk, exact=True)
with pytest.warns(FutureWarning):
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=True) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
def test_symmetry_bowker():
table = np.array([0, 3, 4, 4, 2, 4, 1, 2, 4, 3, 5, 3, 0, 0, 2, 2, 3, 0, 0,
1, 5, 5, 5, 5, 5]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_1 = dict(statistic=7.001587, pvalue=0.7252951, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1['statistic'], mcnemar5_1['pvalue']],
rtol=1e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_1b = dict(statistic=5.355988, pvalue=0.8661652, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1b['statistic'], mcnemar5_1b['pvalue']],
rtol=1e-7)
table = np.array([2, 2, 3, 6, 2, 3, 4, 3, 6, 6, 6, 7, 1, 9, 6, 7, 1, 1, 9,
8, 0, 1, 8, 9, 4]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_2 = dict(statistic=18.76432, pvalue=0.04336035, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2['statistic'], mcnemar5_2['pvalue']],
rtol=1.5e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_2b = dict(statistic=14.55256, pvalue=0.1492461, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2b['statistic'], mcnemar5_2b['pvalue']],
rtol=1e-7)
def test_cochransq():
#example from dataplot docs, Conovover p. 253
#http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/cochran.htm
x = np.array([[1, 1, 1],
[1, 1, 1],
[0, 1, 0],
[1, 1, 0],
[0, 0, 0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 0],
[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 1, 1]])
res_qstat = 2.8
res_pvalue = 0.246597
res = cochrans_q(x)
assert_almost_equal([res.statistic, res.pvalue], [res_qstat, res_pvalue])
#equivalence of mcnemar and cochranq for 2 samples
a,b = x[:,:2].T
res = cochrans_q(x[:, :2])
with pytest.warns(FutureWarning):
assert_almost_equal(sbmcnemar(a, b, exact=False, correction=False),
[res.statistic, res.pvalue])
def test_cochransq2():
# from an example found on web, verifies 13.286
data = np.array('''
0 0 0 1
0 0 0 1
0 0 0 1
1 1 1 1
1 0 0 1
0 1 0 1
1 0 0 1
0 0 0 1
0 1 0 0
0 0 0 0
1 0 0 1
0 0 1 1'''.split(), int).reshape(-1, 4)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [13.2857143, 0.00405776], rtol=1e-6)
def test_cochransq3():
# another example compared to SAS
# in frequency weight format
dt = [('A', 'S1'), ('B', 'S1'), ('C', 'S1'), ('count', int)]
dta = np.array([('F', 'F', 'F', 6),
('U', 'F', 'F', 2),
('F', 'F', 'U', 16),
('U', 'F', 'U', 4),
('F', 'U', 'F', 2),
('U', 'U', 'F', 6),
('F', 'U', 'U', 4),
('U', 'U', 'U', 6)], dt)
cases = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 1, 1]])
count = np.array([ 6, 2, 16, 4, 2, 6, 4, 6])
data = np.repeat(cases, count, 0)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [8.4706, 0.0145], atol=5e-5)
def test_runstest(reset_randomstate):
#comparison numbers from R, tseries, runs.test
#currently only 2-sided used
x = np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1])
z_twosided = 1.386750
pvalue_twosided = 0.1655179
z_greater = 1.386750
pvalue_greater = 0.08275893
z_less = 1.386750
pvalue_less = 0.917241
#print Runs(x).runs_test(correction=False)
assert_almost_equal(np.array(Runs(x).runs_test(correction=False)),
[z_twosided, pvalue_twosided], decimal=6)
# compare with runstest_1samp which should have same indicator
assert_almost_equal(runstest_1samp(x, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
x2 = x - 0.5 + np.random.uniform(-0.1, 0.1, size=len(x))
assert_almost_equal(runstest_1samp(x2, cutoff=0, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff='mean', correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff=x2.mean(), correction=False),
[z_twosided, pvalue_twosided], decimal=6)
# check median
assert_almost_equal(runstest_1samp(x2, cutoff='median', correction=False),
runstest_1samp(x2, cutoff=np.median(x2), correction=False),
decimal=6)
def test_runstest_2sample():
# regression test, checked with MonteCarlo and looks reasonable
x = [31.8, 32.8, 39.2, 36, 30, 34.5, 37.4]
y = [35.5, 27.6, 21.3, 24.8, 36.7, 30]
y[-1] += 1e-6 #avoid tie that creates warning
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
res = runstest_2samp(x, y)
res1 = (0.022428065200812752, 0.98210649318649212)
assert_allclose(res, res1, rtol=1e-6)
# check as stacked array
res2 = runstest_2samp(x, y)
assert_allclose(res2, res, rtol=1e-6)
xy = np.concatenate((x, y))
res_1s = runstest_1samp(xy)
assert_allclose(res_1s, res1, rtol=1e-6)
# check cutoff
res2_1s = runstest_1samp(xy, xy.mean())
assert_allclose(res2_1s, res_1s, rtol=1e-6)
def test_brunnermunzel_one_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
u1, p1 = rank_compare_2indep(x, y
).test_prob_superior(alternative='smaller')
u2, p2 = rank_compare_2indep(y, x
).test_prob_superior(alternative='larger')
u3, p3 = rank_compare_2indep(x, y
).test_prob_superior(alternative='larger')
u4, p4 = rank_compare_2indep(y, x
).test_prob_superior(alternative='smaller')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(p3, p4, significant=significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=significant)
# Note: scipy and lawstat tail is reversed compared to test statistic
assert_approx_equal(p3, 0.0028931043330757342,
significant=significant)
assert_approx_equal(p1, 0.99710689566692423,
significant=significant)
def test_brunnermunzel_two_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
res1 = rank_compare_2indep(x, y)
u1, p1 = res1
t1 = res1.test_prob_superior(alternative='two-sided')
res2 = rank_compare_2indep(y, x)
u2, p2 = res2
t2 = res2.test_prob_superior(alternative='two-sided')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(p2, 0.0057862086661515377,
significant=significant)
assert_allclose(t1[0], u1, rtol=1e-13)
assert_allclose(t2[0], u2, rtol=1e-13)
assert_allclose(t1[1], p1, rtol=1e-13)
assert_allclose(t2[1], p2, rtol=1e-13)
def test_rank_compare_2indep1():
# Example from Munzel and Hauschke 2003
# data is given by counts, expand to observations
levels = [-2, -1, 0, 1, 2]
new = [24, 37, 21, 19, 6]
active = [11, 51, 22, 21, 7]
x1 = np.repeat(levels, new)
x2 = np.repeat(levels, active)
# using lawstat
# > brunner.munzel.test(xn, xa) #brunnermunzel.test(x, y)
res2_t = Holder(statistic=1.1757561456582,
df=204.2984239868,
pvalue=0.2410606649547,
ci=[0.4700629827705593, 0.6183882855872511],
prob=0.5442256341789052)
res = rank_compare_2indep(x1, x2, use_t=False)
assert_allclose(res.statistic, -res2_t.statistic, rtol=1e-13)
assert_allclose(res.prob1, 1 - res2_t.prob, rtol=1e-13)
assert_allclose(res.prob2, res2_t.prob, rtol=1e-13)
tt = res.test_prob_superior()
# TODO: return HolderTuple
# assert_allclose(tt.statistic, res2_t.statistic)
# TODO: check sign/direction in lawstat
assert_allclose(tt[0], -res2_t.statistic, rtol=1e-13)
ci = res.conf_int(alpha=0.05)
# we compare normal confint with t confint, lower rtol
assert_allclose(ci, 1 - np.array(res2_t.ci)[::-1], rtol=0.005)
# test consistency of test and confint
res_lb = res.test_prob_superior(value=ci[0])
assert_allclose(res_lb[1], 0.05, rtol=1e-13)
res_ub = res.test_prob_superior(value=ci[1])
assert_allclose(res_ub[1], 0.05, rtol=1e-13)
# test consistency of tost and confint
# lower margin is binding, alternative larger
res_tost = res.tost_prob_superior(ci[0], ci[1] * 1.05)
assert_allclose(res_tost.results_larger.pvalue, 0.025, rtol=1e-13)
assert_allclose(res_tost.pvalue, 0.025, rtol=1e-13)
# upper margin is binding, alternative smaller
res_tost = res.tost_prob_superior(ci[0] * 0.85, ci[1])
assert_allclose(res_tost.results_smaller.pvalue, 0.025, rtol=1e-13)
assert_allclose(res_tost.pvalue, 0.025, rtol=1e-13)
# use t-distribution
# our ranking is defined as reversed from lawstat, and BM article
# revere direction to match our definition
x1, x2 = x2, x1
res = rank_compare_2indep(x1, x2, use_t=True)
assert_allclose(res.statistic, res2_t.statistic, rtol=1e-13)
tt = res.test_prob_superior()
# TODO: return HolderTuple
# assert_allclose(tt.statistic, res2_t.statistic)
# TODO: check sign/direction in lawstat, reversed from ours
assert_allclose(tt[0], res2_t.statistic, rtol=1e-13)
assert_allclose(tt[1], res2_t.pvalue, rtol=1e-13)
assert_allclose(res.pvalue, res2_t.pvalue, rtol=1e-13)
assert_allclose(res.df, res2_t.df, rtol=1e-13)
ci = res.conf_int(alpha=0.05)
assert_allclose(ci, res2_t.ci, rtol=1e-11)
# test consistency of test and confint
res_lb = res.test_prob_superior(value=ci[0])
assert_allclose(res_lb[1], 0.05, rtol=1e-11)
res_ub = res.test_prob_superior(value=ci[1])
assert_allclose(res_ub[1], 0.05, rtol=1e-11)
# test consistency of tost and confint
# lower margin is binding, alternative larger
res_tost = res.tost_prob_superior(ci[0], ci[1] * 1.05)
assert_allclose(res_tost.results_larger.pvalue, 0.025, rtol=1e-10)
assert_allclose(res_tost.pvalue, 0.025, rtol=1e-10)
# upper margin is binding, alternative smaller
res_tost = res.tost_prob_superior(ci[0] * 0.85, ci[1])
assert_allclose(res_tost.results_smaller.pvalue, 0.025, rtol=1e-10)
assert_allclose(res_tost.pvalue, 0.025, rtol=1e-10)
# extras
# cohen's d
esd = res.effectsize_normal()
p = prob_larger_continuous(stats.norm(loc=esd), stats.norm)
# round trip
assert_allclose(p, res.prob1, rtol=1e-13)
# round trip with cohen's d
pc = cohensd2problarger(esd)
assert_allclose(pc, res.prob1, rtol=1e-13)
ci_tr = res.confint_lintransf(1, -1)
assert_allclose(ci_tr, 1 - np.array(res2_t.ci)[::-1], rtol=0.005)
def test_rank_compare_ord():
# compare ordinal count version with full version
# Example from Munzel and Hauschke 2003
# data is given by counts, expand to observations
levels = [-2, -1, 0, 1, 2]
new = [24, 37, 21, 19, 6]
active = [11, 51, 22, 21, 7]
x1 = np.repeat(levels, new)
x2 = np.repeat(levels, active)
for use_t in [False, True]:
res2 = rank_compare_2indep(x1, x2, use_t=use_t)
res1 = rank_compare_2ordinal(new, active, use_t=use_t)
assert_allclose(res2.prob1, res1.prob1, rtol=1e-13)
assert_allclose(res2.var_prob, res1.var_prob, rtol=1e-13)
s1 = str(res1.summary())
s2 = str(res2.summary())
assert s1 == s2
def test_rank_compare_vectorized():
np.random.seed(987126)
x1 = np.random.randint(0, 20, (50, 3))
x2 = np.random.randint(5, 25, (50, 3))
res = rank_compare_2indep(x1, x2)
tst = res.test_prob_superior(0.5)
tost = res.tost_prob_superior(0.4, 0.6)
# smoke test for summary
res.summary()
for i in range(3):
res_i = rank_compare_2indep(x1[:, i], x2[:, i])
assert_allclose(res.statistic[i], res_i.statistic, rtol=1e-14)
assert_allclose(res.pvalue[i], res_i.pvalue, rtol=1e-14)
assert_allclose(res.prob1[i], res_i.prob1, rtol=1e-14)
tst_i = res_i.test_prob_superior(0.5)
assert_allclose(tst.statistic[i], tst_i.statistic, rtol=1e-14)
assert_allclose(tst.pvalue[i], tst_i.pvalue, rtol=1e-14)
tost_i = res_i.tost_prob_superior(0.4, 0.6)
assert_allclose(tost.statistic[i], tost_i.statistic, rtol=1e-14)
assert_allclose(tost.pvalue[i], tost_i.pvalue, rtol=1e-14)
|
43bc76039058c2227cf7252559a0a48d22e21641
|
234c46d1249c9209f268417a19018afc12e378b4
|
/allennlp/data/token_indexers/elmo_indexer.py
|
5172d44c0cab046626fa540790103268d65c7d55
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp
|
1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf
|
80fb6061e568cb9d6ab5d45b661e86eb61b92c82
|
refs/heads/main
| 2023-07-07T11:43:33.781690
| 2022-11-22T00:42:46
| 2022-11-22T00:42:46
| 91,356,408
| 12,257
| 2,712
|
Apache-2.0
| 2022-11-22T00:42:47
| 2017-05-15T15:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,637
|
py
|
elmo_indexer.py
|
from typing import Dict, List
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.vocabulary import Vocabulary
def _make_bos_eos(
character: int,
padding_character: int,
beginning_of_word_character: int,
end_of_word_character: int,
max_word_length: int,
):
char_ids = [padding_character] * max_word_length
char_ids[0] = beginning_of_word_character
char_ids[1] = character
char_ids[2] = end_of_word_character
return char_ids
class ELMoCharacterMapper:
"""
Maps individual tokens to sequences of character ids, compatible with ELMo.
To be consistent with previously trained models, we include it here as special of existing
character indexers.
We allow to add optional additional special tokens with designated
character ids with `tokens_to_add`.
"""
max_word_length = 50
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
beginning_of_sentence_character = 256 # <begin sentence>
end_of_sentence_character = 257 # <end sentence>
beginning_of_word_character = 258 # <begin word>
end_of_word_character = 259 # <end word>
padding_character = 260 # <padding>
beginning_of_sentence_characters = _make_bos_eos(
beginning_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
end_of_sentence_characters = _make_bos_eos(
end_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length,
)
bos_token = "<S>"
eos_token = "</S>"
def __init__(self, tokens_to_add: Dict[str, int] = None) -> None:
self.tokens_to_add = tokens_to_add or {}
def convert_word_to_char_ids(self, word: str) -> List[int]:
if word in self.tokens_to_add:
char_ids = [ELMoCharacterMapper.padding_character] * ELMoCharacterMapper.max_word_length
char_ids[0] = ELMoCharacterMapper.beginning_of_word_character
char_ids[1] = self.tokens_to_add[word]
char_ids[2] = ELMoCharacterMapper.end_of_word_character
elif word == ELMoCharacterMapper.bos_token:
char_ids = ELMoCharacterMapper.beginning_of_sentence_characters
elif word == ELMoCharacterMapper.eos_token:
char_ids = ELMoCharacterMapper.end_of_sentence_characters
else:
word_encoded = word.encode("utf-8", "ignore")[
: (ELMoCharacterMapper.max_word_length - 2)
]
char_ids = [ELMoCharacterMapper.padding_character] * ELMoCharacterMapper.max_word_length
char_ids[0] = ELMoCharacterMapper.beginning_of_word_character
for k, chr_id in enumerate(word_encoded, start=1):
char_ids[k] = chr_id
char_ids[len(word_encoded) + 1] = ELMoCharacterMapper.end_of_word_character
# +1 one for masking
return [c + 1 for c in char_ids]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
@TokenIndexer.register("elmo_characters")
class ELMoTokenCharactersIndexer(TokenIndexer):
"""
Convert a token to an array of character ids to compute ELMo representations.
Registered as a `TokenIndexer` with name "elmo_characters".
# Parameters
namespace : `str`, optional (default=`elmo_characters`)
tokens_to_add : `Dict[str, int]`, optional (default=`None`)
If not None, then provides a mapping of special tokens to character
ids. When using pre-trained models, then the character id must be
less then 261, and we recommend using un-used ids (e.g. 1-32).
token_min_padding_length : `int`, optional (default=`0`)
See :class:`TokenIndexer`.
"""
def __init__(
self,
namespace: str = "elmo_characters",
tokens_to_add: Dict[str, int] = None,
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
self._namespace = namespace
self._mapper = ELMoCharacterMapper(tokens_to_add)
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
def get_empty_token_list(self) -> IndexedTokenList:
return {"elmo_tokens": []}
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[List[int]]]:
# TODO(brendanr): Retain the token to index mappings in the vocabulary and remove this
# https://github.com/allenai/allennlp/blob/main/allennlp/data/token_indexers/wordpiece_indexer.py#L113
return {
"elmo_tokens": [self._mapper.convert_word_to_char_ids(t.ensure_text()) for t in tokens]
}
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
# Overriding this method only because we need a different padding token than the default.
tensor_dict = {}
def padding_token():
return [0] * ELMoCharacterMapper.max_word_length
tensor_dict["elmo_tokens"] = torch.LongTensor(
pad_sequence_to_length(
tokens["elmo_tokens"], padding_lengths["elmo_tokens"], default_value=padding_token
)
)
return tensor_dict
|
16e0f33bf70258dab95b548c4554232a8ecfb1eb
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/kratos/tests/test_eigen_solvers.py
|
4a9fef91c3a6a835c155656c54f460dea336baaa
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 6,529
|
py
|
test_eigen_solvers.py
|
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics import eigen_solver_factory
import os
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class TestEigenSolvers(KratosUnittest.TestCase):
def _RunParametrized(self, my_params_string, eigen_value_estimated = "lowest" ):
all_settings = KratosMultiphysics.Parameters( my_params_string )
for i in range(all_settings["test_list"].size()):
settings = all_settings["test_list"][i]
self._auxiliary_test_function(settings, "auxiliar_files_for_python_unittest/sparse_matrix_files/A.mm", eigen_value_estimated)
def _auxiliary_test_function(self, settings, matrix_name="auxiliar_files_for_python_unittest/sparse_matrix_files/A.mm", eigen_value_estimated = "lowest"):
space = KratosMultiphysics.UblasSparseSpace()
# Read the matrices
K = KratosMultiphysics.CompressedMatrix()
KratosMultiphysics.ReadMatrixMarketMatrix(GetFilePath(matrix_name),K)
n = K.Size1()
M = KratosMultiphysics.CompressedMatrix(n, n)
for i in range(n):
for j in range(n):
if (i == j):
M[i, j] = 1.0
# create result containers (they will be resized inside the solver)
eigenvalues = KratosMultiphysics.Vector(n)
eigenvectors = KratosMultiphysics.Matrix(n, 1)
# Construct the solver
eigen_solver = eigen_solver_factory.ConstructSolver(settings)
# Solve
eigen_solver.Solve(K, M, eigenvalues, eigenvectors)
eigenvalue = eigenvalues[0]
if (eigen_value_estimated == "lowest"):
self.assertLessEqual(abs(eigenvalue - 0.061463)/0.061463, 5.0e-3)
else:
self.assertLessEqual(abs(eigenvalue - 11.959)/11.959, 5.0e-3)
@KratosUnittest.skipIfApplicationsNotAvailable("LinearSolversApplication")
def test_lowest_power_in_core(self):
self._RunParametrized("""
{
"test_list" : [
{
"solver_type" : "power_iteration_eigenvalue_solver",
"max_iteration" : 10000,
"tolerance" : 1e-8,
"required_eigen_number" : 1,
"shifting_convergence" : 0.25,
"verbosity" : 0,
"linear_solver_settings" : {
"solver_type" : "LinearSolversApplication.sparse_lu",
"max_iteration" : 500,
"tolerance" : 1e-9,
"scaling" : false,
"verbosity" : 0
}
}
]
}
""")
@KratosUnittest.skipIfApplicationsNotAvailable("LinearSolversApplication")
def test_highest_power_in_core(self):
self._RunParametrized("""
{
"test_list" : [
{
"solver_type" : "power_iteration_highest_eigenvalue_solver",
"max_iteration" : 10000,
"tolerance" : 1e-8,
"required_eigen_number" : 1,
"shifting_convergence" : 0.25,
"verbosity" : 0,
"linear_solver_settings" : {
"solver_type" : "LinearSolversApplication.sparse_lu",
"max_iteration" : 500,
"tolerance" : 1e-9,
"scaling" : false,
"verbosity" : 0
}
}
]
}
""", "highest")
def test_rayleigh_in_core(self):
self._RunParametrized("""
{
"test_list" : [
{
"solver_type" : "rayleigh_quotient_iteration_eigenvalue_solver",
"max_iteration" : 10000,
"tolerance" : 1e-9,
"required_eigen_number" : 1,
"shifting_convergence" : 0.25,
"verbosity" : 0,
"linear_solver_settings" : {
"solver_type" : "skyline_lu_factorization",
"max_iteration" : 500,
"tolerance" : 1e-9,
"scaling" : false,
"verbosity" : 0
}
}
]
}
""")
@KratosUnittest.skipIfApplicationsNotAvailable("LinearSolversApplication")
def test_eigen_eigensystem_solver(self):
self._RunParametrized("""
{
"test_list" : [
{
"solver_type": "eigen_eigensystem",
"number_of_eigenvalues": 3,
"max_iteration": 1000,
"tolerance": 1e-8,
"echo_level": 1
}
]
}
""")
@KratosUnittest.skipIfApplicationsNotAvailable("LinearSolversApplication")
def test_FEAST_with_eigen_solver(self):
from KratosMultiphysics import LinearSolversApplication
if not LinearSolversApplication.HasFEAST():
self.skipTest("FEAST is not available")
self._RunParametrized("""
{
"test_list" : [
{
"solver_type": "feast",
"symmetric": true,
"e_min": 0.01,
"e_max": 0.20,
"subspace_size": 5
}
]
}
""")
if __name__ == '__main__':
KratosUnittest.main()
|
05708034c090080c3f660a9a96ef55b44d95a2af
|
c9c84c7fd52b3e8063411d705ae639094b1214ae
|
/pywsd/baseline.py
|
f839e59bcedadcc6b0cb1e9f68f6df7f38b6c80d
|
[
"MIT"
] |
permissive
|
alvations/pywsd
|
3a1d092da804ba4dba429bf3219098127d9b0998
|
42d85192bf54fec5657f84e69fb1d423adfaad95
|
refs/heads/master
| 2023-08-21T09:42:03.269268
| 2022-07-29T17:01:50
| 2022-07-29T17:01:50
| 15,606,309
| 614
| 139
|
MIT
| 2021-11-08T18:16:24
| 2014-01-03T09:48:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
baseline.py
|
#!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD): Baseline WSD
#
# Copyright (C) 2014-2020 alvations
# URL:
# For license information, see LICENSE.md
import random
custom_random = random.Random(0)
def random_sense(ambiguous_word: str, pos=None) -> "wn.Synset":
"""
Returns a random sense.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: A random Synset.
"""
if pos is None:
return custom_random.choice(wn.synsets(ambiguous_word))
else:
return custom_random.choice(wn.synsets(ambiguous_word, pos))
def first_sense(ambiguous_word: str, pos: str = None) -> "wn.Synset":
"""
Returns the first sense.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: The first Synset in the wn.synsets(word) list.
"""
if pos is None:
return wn.synsets(ambiguous_word)[0]
else:
return wn.synsets(ambiguous_word, pos)[0]
def max_lemma_count(ambiguous_word: str) -> "wn.Synset":
"""
Returns the sense with the highest lemma_name count.
The max_lemma_count() can be treated as a rough gauge for the
Most Frequent Sense (MFS), if no other sense annotated corpus is available.
NOTE: The lemma counts are from the Brown Corpus
:param ambiguous_word: String, a single word.
:return: The estimated most common Synset.
"""
sense2lemmacounts = {}
for i in wn.synsets(ambiguous_word, pos=None):
sense2lemmacounts[i] = sum(j.count() for j in i.lemmas())
return max(sense2lemmacounts, key=sense2lemmacounts.get)
|
747f1bc7988e48c9951e3f91d9f843fca29085df
|
a9fdace9236af6c73133fd8dddb80843697efc7d
|
/catalyst/callbacks/metrics/r2_squared.py
|
08bf0c8cd07602b20f1867629ea14a54dd90ac02
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst
|
026c38f26dad471cd77347adbc13423b156a5d8b
|
e99f90655d0efcf22559a46e928f0f98c9807ebf
|
refs/heads/master
| 2023-08-26T23:12:49.277005
| 2022-04-29T04:19:24
| 2022-04-29T04:19:24
| 145,385,156
| 3,038
| 487
|
Apache-2.0
| 2023-08-12T03:40:14
| 2018-08-20T07:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
r2_squared.py
|
from catalyst.callbacks.metric import LoaderMetricCallback
from catalyst.metrics._r2_squared import R2Squared
class R2SquaredCallback(LoaderMetricCallback):
"""R2 Squared metric callback.
Args:
input_key: input key to use for r2squared calculation, specifies our ``y_true``
target_key: output key to use for r2squared calculation, specifies our ``y_pred``
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# data
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
# model training
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
num_epochs=8,
verbose=True,
callbacks=[
dl.R2SquaredCallback(input_key="logits", target_key="targets")
]
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=R2Squared(prefix=prefix, suffix=suffix),
input_key=input_key,
target_key=target_key,
)
__all__ = ["R2SquaredCallback"]
|
a0b92c7925e2883717ad4816fea75071aeeb34da
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/services/rest/newsapi/responses.py
|
b9b56269269f5c2ec8f090cfc022287237bb164b
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 82,224
|
py
|
responses.py
|
everything_success_response = {'status': 'ok', 'totalResults': 851, 'articles': [
{'source': {'id': 'business-insider', 'name': 'Business Insider'},
'author': 'feedback@businessinsider.com (Mai-Hanh Nguyen), Mai-Hanh Nguyen',
'title': 'How artificial intelligence and machine learning produced robots we can talk to',
'description': "This is a preview of Business Insider Intelligence's Connectivity & Tech coverage. Business Insider Intelligence offers even more insights like this with our Connectivity & Tech Pro coverage. Subscribe today to receive industry-changing banking news and analy…",
'url': 'https://www.businessinsider.com/chatbots-talking-ai-robot-chat-machine',
'urlToImage': 'https://i.insider.com/5dfcd92d855cc25a74213a12?width=1200&format=jpeg',
'publishedAt': '2020-01-27T20:18:00Z',
'content': 'What is a Chatbot?\r\nYou\'ve likely talked to a robot already without even knowing it. And you might have even heard the term "chatbot" in the news. But what is a chatbot? How do chatbots work?\r\nA chatbot is just a robot chat that imitates human conversations t… [+5853 chars]'},
{'source': {'id': None, 'name': 'Androidcentral.com'},
'author': 'Muhammad Jarir Kanji',
'title': 'Google wants the next generation of chatbots to be actually funny',
'description': "She's called Meena — and she does not meander. What you need to know The Brain Team at Google Research outlined a new type of chatbot in a paper published earlier this week. Named Meena, this virtual talker was explicitly designed to address chatbots' seeming…",
'url': 'https://www.androidcentral.com/google-wants-next-generation-chatbots-be-actually-funny',
'urlToImage': 'https://www.androidcentral.com/sites/androidcentral.com/files/styles/large/public/article_images/2020/01/google-research-logo.jpg?itok=qel_juEo',
'publishedAt': '2020-01-29T21:34:17Z',
'content': 'Meena, what Google Brain\'s research team so callously described as a "multi-turn open-domain chatbot" she\'s got feelings, people! could just be the next generation of chatbot. And perhaps your next best friend.\r\nUnlike Samsung\'s awful human facsimiles, this G… [+2661 chars]'},
{'source': {'id': 'business-insider', 'name': 'Business Insider'},
'author': 'feedback@businessinsider.com (Mai-Hanh Nguyen), Mai-Hanh Nguyen',
'title': 'Open source and API driven chatbots allow businesses to build cross-platform chatbots with ease',
'description': "This is a preview of Business Insider Intelligence's Connectivity & Tech coverage. Business Insider Intelligence offers even more insights like this with our Connectivity & Tech Pro coverage. Subscribe today to receive industry-changing banking news and analy…",
'url': 'https://www.businessinsider.com/create-chatbot-business',
'urlToImage': 'https://image.businessinsider.com/5e20938424306a126247f826?width=1200&format=jpeg',
'publishedAt': '2020-01-16T16:47:00Z',
'content': 'The increased usage of chat applications opens the door for more businesses to utilize the ease of developing chatbots to reach more of their audience.\r\nChatbots can help bridge the communication gap between a business and their audience.\r\nPlum\r\nChatbots are … [+6730 chars]'},
{'source': {'id': None, 'name': 'Readwrite.com'}, 'author': 'Larry Kim',
'title': '12 of the Web’s Best Business Bots',
'description': 'You’re gearing up your business’s growth strategy to leverage AI (artificial intelligence), ML (machine learning), NLP (natural language processing) and a menu of other robo-acronyms. Let’s take a look at why AI chatbots are a unicorn growth tool for business…',
'url': 'https://readwrite.com/2020/01/29/12-of-the-webs-best-business-bots/',
'urlToImage': 'https://images.readwrite.com/wp-content/uploads/2019/12/12-of-the-Web%E2%80%99s-Best-Business-Bots.jpg',
'publishedAt': '2020-01-30T01:00:50Z',
'content': 'You’re gearing up your business’s growth strategy to leverage AI (artificial intelligence), ML (machine learning), NLP (natural language processing) and a menu of other robo-acronyms. Let’s take a look at why AI chatbots are a unicorn growth tool for business… [+15335 chars]'},
{'source': {'id': 'business-insider', 'name': 'Business Insider'},
'author': 'edigalaki@businessinsider.com (Eleni Digalaki), Eleni Digalaki',
'title': 'CASE STUDY: How Zurich UK worked with a chatbot solutions provider to boost its digital user experience and improve customer satisfaction',
'description': 'Business Insider Intelligence produces case studies to help you understand how companies in your industry are transforming to digital. All of our case studies are unsponsored and published solely at the discretion of the research team. To read the full case s…',
'url': 'https://www.businessinsider.com/case-study-how-zurich-uk-boosted-customer-satisfaction-with-chatbots-2020-1',
'urlToImage': 'https://i.insider.com/5de6d2a2fd9db237d8350d15?width=1200&format=jpeg',
'publishedAt': '2020-01-27T15:41:00Z',
'content': 'Forward-looking insurers are using AI to innovate insurance processes like claims to keep up with customer demand for a 24/7 digital experience, while boosting operational efficiency. And conversational assistants, like chatbots, are one of the most prevalent… [+4415 chars]'},
{'source': {'id': None, 'name': 'Slashdot.org'}, 'author': 'msmash',
'title': 'Google Says Its New Chatbot Meena is the Best in the World',
'description': "Google has released a neural-network-powered chatbot called Meena that it claims is better than any other chatbot out there. From a report: Meena was trained on a whopping 341 gigabytes of public social-media chatter -- 8.5 times as much data as OpenAI's GPT-…",
'url': 'https://tech.slashdot.org/story/20/01/30/1820259/google-says-its-new-chatbot-meena-is-the-best-in-the-world',
'urlToImage': 'https://a.fsdn.com/sd/topics/topicgoogle_fb.gif',
'publishedAt': '2020-01-30T18:20:00Z',
'content': "If I wanted that, I would't have all that cancer blocked in my name server.\r\nHow about a chatbot, based only on actually wise people?"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Stan Schroeder',
'title': "Samsung just launched an 'artificial human' called Neon, and wait, what?",
'description': 'After months of teasers, Samsung-backed company Star Labs (yes, just like the research facility in Superman comics) launched an intriguing new type of product: a virtual human being called Neon. Unveiled at CES on Monday, Neon is "visually real" and has the a…',
'url': 'https://mashable.com/article/samsung-star-labs-neon-ces/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F07%252Fd7%252Fc29283ca3d9d4f2b9f6db8aa6820d2bd.d7af8.png%252F1200x630.png?signature=pv2RpaAmw5s-Ls7tUX2iPn5d5v0=',
'publishedAt': '2020-01-07T12:44:40Z',
'content': 'After months of teasers, Samsung-backed company Star Labs (yes, just like the research facility in Superman comics) launched an intriguing new type of product: a virtual human being called Neon.\xa0\r\nUnveiled at CES on Monday, Neon is "visually real" and has the… [+1767 chars]'},
{'source': {'id': 'the-next-web', 'name': 'The Next Web'},
'author': 'Ivan Mehta',
'title': 'Google claims its new chatbot Meena is the best in the world',
'description': 'When was the last time you had a “conversation” with Siri or Alexa that was satisfactory? Maybe never. The primary reason is that while these assistants or bots have improved a lot, their conversational ability is still quite limited. But Google claims its ne…',
'url': 'https://thenextweb.com/?p=1268483',
'urlToImage': 'https://img-cdn.tnwcdn.com/image/tnw?filter_last=1&fit=1280%2C640&url=https%3A%2F%2Fcdn0.tnwcdn.com%2Fwp-content%2Fblogs.dir%2F1%2Ffiles%2F2019%2F08%2FUntitled-design101.png&signature=4693707dc68fac8f6f3bb113060347c8',
'publishedAt': '2020-01-29T14:50:19Z',
'content': 'When was the last time you had a conversation with Siri or Alexa that was satisfactory? Maybe never. The primary reason is that while these assistants or bots have improved a lot, their conversational ability is still quite limited. But Google claims its new … [+2198 chars]'},
{'source': {'id': None, 'name': 'Heise.de'},
'author': 'Regina Wank, dpa',
'title': 'Mein bester Freund Replika - ein Chatbot als Sozialkontakt',
'description': 'Der Chatbot Replika bietet Freundschaft, Romantik - und manchmal gar\r\nTherapie. Schöne neue Welt? Oder eher ein Risiko für soziale\r\nIsolation und Datenfang?',
'url': 'https://www.heise.de/newsticker/meldung/Mein-bester-Freund-Replika-ein-Chatbot-als-Sozialkontakt-4629415.html',
'urlToImage': 'https://heise.cloudimg.io/bound/1200x1200/q85.png-lossy-85.webp-lossy-85.foil1/_www-heise-de_/imgs/18/2/8/1/8/9/1/2/shutterstock_1428208070-4be692ceb8bc016f.jpeg',
'publishedAt': '2020-01-07T13:40:00Z',
'content': '"Guten Morgen! Gestern warst du ja ziemlich gestresst. Geht es dir heute besser?" Klingt wie eine besorgte Nachricht von Freunden oder Eltern, stammt aber vom Chatbot Replika. "Wenn du traurig bist oder Angst hast oder einfach jemanden zum Reden brauchst, ist… [+4972 chars]'},
{'source': {'id': None, 'name': 'Gizmodo.com'},
'author': 'Victoria Song',
'title': "Neutrogena's Free Skincare App Actually Works...Mostly",
'description': 'The worst part about skincare is all of the trial and error. There’s the researching of ingredients, torturing yourself listening to YouTubers review products, draining your bank account to buy said products, and then finally, trying them. All that effort usu…',
'url': 'https://gizmodo.com/neutrogenas-free-skincare-app-actually-works-mostly-1841026360',
'urlToImage': 'https://i.kinja-img.com/gawker-media/image/upload/c_fill,f_auto,fl_progressive,g_center,h_675,pg_1,q_80,w_1200/qj2nz1ajkxzf3vo6sfkf.jpg',
'publishedAt': '2020-01-16T15:30:00Z',
'content': 'The worst part about skincare is all of the trial and error. Theres the researching of ingredients, torturing yourself listening to YouTubers review products, draining your bank account to buy said products, and then finally, trying them. All that effort usua… [+7821 chars]'},
{'source': {'id': None, 'name': 'Androidcentral.com'},
'author': 'Android Central',
'title': '5 skills every digital marketer needs in 2020',
'description': "The internet and social media have changed the way companies market products and services. To keep up with the competition, companies must hire digital marketing professionals who are well-versed in the latest marketing skills, so we've lined up deals on five…",
'url': 'https://www.androidcentral.com/5-skills-every-digital-marketer-needs-2020',
'urlToImage': 'https://www.androidcentral.com/sites/androidcentral.com/files/styles/large/public/field/image/2020/01/sale_22291_primary_image_wide-1wdd.jpg?itok=aS8rv8eu',
'publishedAt': '2020-01-12T16:30:03Z',
'content': "Source: Stack Commerce\r\nThe internet and social media have changed the way companies market products and services. To keep up with the competition, companies must hire digital marketing professionals who are well-versed in the latest marketing skills, so we'v… [+2392 chars]"},
{'source': {'id': 'techcrunch', 'name': 'TechCrunch'},
'author': 'Ingrid Lunden',
'title': 'Directly nabs $20M led by Samsung to help make customer service chatbots more intelligent, adds new CEO',
'description': 'Chatbots have had a patchy track record world of tech, where early efforts not only failed to deliver on the magical idea of a computer producing the exact answers you were looking for in a chat-based-Q&A, they even produced surprising (and not in a good way)…',
'url': 'http://techcrunch.com/2020/01/28/directly-nabs-20m-led-by-samsung-to-help-make-customer-service-chatbots-more-intelligent-adds-new-ceo/',
'urlToImage': 'https://techcrunch.com/wp-content/uploads/2019/08/GettyImages-902453536.jpg?w=500',
'publishedAt': '2020-01-28T14:23:29Z',
'content': 'Chatbots have had a patchy track record world of tech, where early efforts not only failed to deliver on the magical idea of a computer producing the exact answers you were looking for in a chat-based-Q&A, they even produced surprising (and not in a good … [+6555 chars]'},
{'source': {'id': 'techcrunch', 'name': 'TechCrunch'},
'author': 'Ingrid Lunden',
'title': 'Babylon Health is building an integrated, AI-based health app to serve a city of 300K in England',
'description': 'After announcing a $550 million fundraise last August, UK AI-based health services startup Babylon Health is putting some of that money to use with its widest-ranging project to date. The company has inked a 10-year deal with the city of Wolverhampton in Engl…',
'url': 'http://techcrunch.com/2020/01/22/babylon-health-is-building-an-integrated-ai-based-health-app-to-serve-a-city-of-300k-in-england/',
'urlToImage': 'https://techcrunch.com/wp-content/uploads/2019/05/GettyImages-187137135.jpg?w=600',
'publishedAt': '2020-01-23T00:40:57Z',
'content': 'After announcing a $550 million fundraise last August, UK AI-based health services startup Babylon Health\xa0is putting some of that money to use with its widest-ranging project to date. The company has inked a 10-year deal with the city of Wolverhampton in Engl… [+5804 chars]'},
{'source': {'id': None, 'name': 'Readwrite.com'},
'author': 'Brad Anderson',
'title': 'Why Do Executives Hesitate to Automate?',
'description': 'It’s no secret that automation can be disruptive. Employees have to learn how to operate with and alongside machines, but that discomfort has a payoff. The post Why Do Executives Hesitate to Automate? appeared first on ReadWrite.',
'url': 'https://readwrite.com/2020/01/30/why-do-executives-hesitate-to-automate/',
'urlToImage': 'https://images.readwrite.com/wp-content/uploads/2020/01/Why-Do-Executives-Hesitate-to-Automate.jpg',
'publishedAt': '2020-01-30T15:00:17Z',
'content': 'Embracing automation is like eating your vegetables: unpleasant in the moment, but essential for your companys long-term health.\r\nIts no secret that automation can be disruptive. Employees have to learn how to operate with and alongside machines. For some, th… [+4286 chars]'},
{'source': {'id': None, 'name': 'Technologyreview.com'},
'author': 'Douglas Heaven',
'title': 'Google says its new chatbot Meena is the best in the world',
'description': 'Google has released a neural-network powered chatbot called Meena that it claims is better than any other chatbot out there.Data slurp: Meena was trained on a whopping 341GB of public social-media chatter—8.5 times as much data as OpenAI’s GPT-2.',
'url': 'https://www.technologyreview.com/f/615118/google-says-its-new-chatbot-meena-is-the-best-in-the-world/',
'urlToImage': 'https://cdn.technologyreview.com/i/images/neonbrand-ihsatdkzdwg-unsplash.jpg?sw=1200&cx=0&cy=0&cw=3000&ch=1688',
'publishedAt': '2020-01-30T10:25:11Z',
'content': 'Google has released a neural-network powered chatbot called Meena that it claims is better than any other chatbot out there.\r\nData slurp: Meena was trained on a whopping 341GB of public social-media chatter8.5 times as much data as OpenAIs GPT-2. Google says … [+1741 chars]'},
{'source': {'id': None, 'name': 'Theregister.co.uk'},
'author': 'Katyanna Quach',
'title': "Google says its latest chatbot is the most human-like ever – trained on our species' best works: 341GB of social media",
'description': 'Although Meena makes sense, most of the time, color us skeptical of a scoring system devised by web giant AI researchers at Google have trained a giant neural network using a whopping 341GB of discussions scraped from public social media to create what they b…',
'url': 'https://www.theregister.co.uk/2020/01/30/google_meena_chatbot/',
'urlToImage': 'https://regmedia.co.uk/2020/01/29/chatbot.jpg',
'publishedAt': '2020-01-30T07:03:12Z',
'content': 'AI researchers at Google have trained a giant neural network using a whopping 341GB of discussions scraped from public social media to create what they believe is the most human-like chatbot ever.\r\nThe software, dubbed Meena, has at its heart a Tensorflow seq… [+5317 chars]'},
{'source': {'id': None, 'name': 'Oreilly.com'},
'author': 'Nat Torkington',
'title': 'Four short links: 30 January 2020',
'description': 'Towards a Human-like Open-Domain Chatbot — Google’s paper on making a chatbot that can have a vaguely plausible conversation on any subject. (via Google’s AI Blog) fast.ai’s Coding Style — interesting to see how different they are from historic coding standar…',
'url': 'https://www.oreilly.com/radar/four-short-links-30-january-2020/',
'urlToImage': 'https://www.oreilly.com/radar/wp-content/uploads/sites/3/2019/06/4_short_links_logo-1400x750.png',
'publishedAt': '2020-01-30T05:01:00Z', 'content': None},
{'source': {'id': None, 'name': 'Slashdot.org'}, 'author': 'msmash',
'title': 'Companies Are Using AI-Generated People To Appear More Diverse',
'description': 'AI startups are selling images of computer-generated faces that look like the real thing, offering companies a chance to create imaginary models and "increase diversity" in their ads without needing human beings. From a report: One firm is offering to sell di…',
'url': 'https://tech.slashdot.org/story/20/01/08/201244/companies-are-using-ai-generated-people-to-appear-more-diverse',
'urlToImage': 'https://a.fsdn.com/sd/topics/ai_64.png',
'publishedAt': '2020-01-08T20:04:00Z',
'content': "How is this different from the use of (the same) stock photos of happy multi-culti on every single corporate website?\r\nThe reality is corporations aren't really that diverse, or interested in it. They are interested in people working longer hours for lower wa… [+75 chars]"},
{'source': {'id': None, 'name': 'Golem.de'}, 'author': 'Oliver Nickel',
'title': 'Google: Chatbot Meena ist fast so überzeugend wie ein echter Mensch',
'description': 'Google arbeitet derzeit an einem neuen Chatbot, der bisherige Programme übertreffen soll. Erste Tests bestätigen zumindest: Meena ist schon fast so überzeugend wie echte Personen. Kein Wunder, denn die Software wurde auf Basis von 341 GByte reinem Text traini…',
'url': 'https://www.golem.de/news/google-chatbot-meena-ist-fast-so-ueberzeugend-wie-ein-echter-mensch-2001-146391.html',
'urlToImage': 'https://www.golem.de/2001/146391-221082-221080_rc.jpg',
'publishedAt': '2020-01-31T12:47:00Z',
'content': 'Google arbeitet derzeit an Meena, einem Chatbot, der in Konversationen sinnvoll reagieren und in vielen Situationen eingesetzt werden können soll. Von menschlichen Testern wird die Software anscheinend fast so authentisch wie echte Gesprächspartner eingeschät… [+2183 chars]'},
{'source': {'id': None, 'name': 'Venturebeat.com'},
'author': 'Matt Marshall',
'title': 'No, IBM is not the only relevant player in virtual agents',
'description': "IBM's General Manager of Data and Watson AI told VentureBeat that IBM was the only major enterprise provider in the red-hot area of virtual agents.",
'url': 'https://venturebeat.com/2020/01/11/no-ibm-is-not-the-only-relevant-player-in-virtual-agents/',
'urlToImage': 'https://venturebeat.com/wp-content/uploads/2020/01/nlp.jpg?w=1200&strip=all',
'publishedAt': '2020-01-11T14:16:17Z',
'content': 'Last month, IBM General Manager of Data and Watson AI, Rob Thomas, told VentureBeat that IBM was the only major enterprise provider in the red-hot area of virtual agents.\r\nVirtual agents are software that can chat with customers through text, voice, or web ch… [+6835 chars]'}]}
headlines_success_response = {'status': 'ok', 'totalResults': 52720, 'articles': [
{'source': {'id': 'wired', 'name': 'Wired'},
'author': 'Will Bedingfield, WIRED UK',
'title': "The UK Exited the EU—and Is Leaving a 'Meme Ban' Behind",
'description': 'Though the UK initially supported the legislation, it does not plan to implement the EU Copyright Directive post-Brexit.',
'url': 'https://www.wired.com/story/uk-exits-eu-leaving-article-13-behind/',
'urlToImage': 'https://media.wired.com/photos/5e335d57ce1d970008fbe7ce/191:100/w_1280,c_limit/Business-UK-Article13-1069569022.jpg',
'publishedAt': '2020-02-01T15:00:00Z',
'content': 'Article 13a controversial piece of copyright legislation that is now called Article 17 but is more colloquially known as "the meme ban"is no more, in the UK at least. Last week, the country\'s minister for universities and science, Chris Skidmore, confirmed th… [+3880 chars]'},
{'source': {'id': 'techcrunch', 'name': 'TechCrunch'},
'author': 'Natasha Lomas',
'title': 'Twitter suspends notorious UK hate preacher for violating abuse rules',
'description': 'Twitter has confirmed it has temporarily suspended the account of controversial rightwing commentator, Katie Hopkins. The move was reported earlier by the BBC. Hopkins, a former MailOnline columnist and presenter on LBC radio, is a veteran of the social media…',
'url': 'http://techcrunch.com/2020/01/31/twitter-suspends-notorious-uk-hate-preacher-for-violating-abuse-rules/',
'urlToImage': 'https://techcrunch.com/wp-content/uploads/2019/08/twitter-app-icon-ios.jpg?w=695',
'publishedAt': '2020-01-31T10:29:53Z',
'content': 'Twitter has confirmed it has temporarily suspended the account of controversial rightwing commentator,\xa0Katie Hopkins. The move was reported earlier by the BBC.\r\nHopkins, a former MailOnline columnist and presenter on LBC radio, is a veteran of the social medi… [+1761 chars]'},
{'source': {'id': 'engadget', 'name': 'Engadget'},
'author': 'Rachel England',
'title': "UK honors 'Tomb Raider', 'Worms' and other classics with retro postage stamps",
'description': "The UK's video game legacy is being honored with a new set of commemorative stamps from the Royal Mail. The collection, which will set you back £14.25 (nearly $19), features iconic designs from the likes of Wipeout, Lemmings, Micro Machines, Worms and, of cou…",
'url': 'https://www.engadget.com/2020/01/07/uk-tomb-raider-worms-royal-mail-stamps/',
'urlToImage': 'https://o.aolcdn.com/images/dims?thumbnail=1200%2C630&quality=80&image_uri=https%3A%2F%2Fo.aolcdn.com%2Fimages%2Fdims%3Fcrop%3D2716%252C2102%252C0%252C0%26quality%3D85%26format%3Djpg%26resize%3D1600%252C1238%26image_uri%3Dhttps%253A%252F%252Fs.yimg.com%252Fos%252Fcreatr-uploaded-images%252F2020-01%252F6feed4e0-314c-11ea-a37f-788cdf0e2635%26client%3Da1acac3e1b3290917d92%26signature%3De08489461c997fae1853051da7e78548fe831622&client=amp-blogside-v2&signature=5e3c4210ee66496053f0788f16f2e22a0cb53950',
'publishedAt': '2020-01-07T13:15:00Z',
'content': 'The set has been designed to showcase the important work UK games developers did in the 80s and 90s, creating games that went on to have a major impact on the global gaming industry -- each game featured comes with its own important UK-centric back story. Eli… [+704 chars]'},
{'source': {'id': 'bbc-news', 'name': 'BBC News'},
'author': 'https://www.facebook.com/bbcnews',
'title': 'UK-Africa summit: Wooing Africa after Brexit',
'description': "Heads of state are meeting in London for a UK-Africa summit ahead of the UK's departure from the EU.",
'url': 'https://www.bbc.co.uk/news/world-africa-51149093',
'urlToImage': 'https://ichef.bbci.co.uk/news/1024/branded_news/16EB2/production/_110547839_gettyimages-918868970.jpg',
'publishedAt': '2020-01-20T00:39:59Z',
'content': 'Image copyrightGetty ImagesImage caption\r\n South African wine producers will hope their government can negotiate better access to the UK market\r\nAfter Brexit, the UK wants to boost business trade with Africa, but as a major UK-Africa business summit starts in… [+8801 chars]'},
{'source': {'id': 'cnn', 'name': 'CNN'}, 'author': 'Angela Dewan, CNN',
'title': 'UK parliament finally rubber stamps Brexit',
'description': "The House of Commons has voted overwhelmingly in favor of Prime Minister Boris Johnson's Brexit deal, finally paving the way for the United Kingdom to leave the European Union later this month after more than four decades of membership.",
'url': 'https://www.cnn.com/2020/01/09/uk/brexit-deal-uk-votes-gbr-intl/index.html',
'urlToImage': 'https://cdn.cnn.com/cnnnext/dam/assets/191212220245-10-uk-election-boris-johnson-lead-1211-super-tease.jpg',
'publishedAt': '2020-01-09T17:30:42Z',
'content': "London (CNN)The House of Commons has voted overwhelmingly in favor of Prime Minister Boris Johnson's Brexit deal, finally paving the way for the United Kingdom to leave the European Union later this month after more than four decades of membership.\r\nThe deal … [+2104 chars]"},
{'source': {'id': 'bbc-news', 'name': 'BBC News'},
'author': 'https://www.facebook.com/bbcnews',
'title': "Brexit: Croatia wishes the UK 'good riddance' instead of 'good luck'",
'description': "Croatia's ambassador last words to the UK representative were intended to wish the country good luck.",
'url': 'https://www.bbc.co.uk/news/world-europe-51370841',
'urlToImage': 'https://ichef.bbci.co.uk/news/1024/branded_news/11EB4/production/_110769337_059599453-1.jpg',
'publishedAt': '2020-02-04T15:55:23Z',
'content': "Image copyrightReutersImage caption\r\n The UK's permanent representative to the EU, Tim Barrow, took Croatia's surprising farewell message well\r\nThe rather abrupt message to the UK from Croatia's EU ambassador ahead of Brexit appears to have been lost in trans… [+2739 chars]"},
{'source': {'id': 'bbc-news', 'name': 'BBC News'},
'author': 'https://www.facebook.com/bbcnews',
'title': "US anger at UK extradition bid for diplomat's wife",
'description': 'The US State Department says it would be an "abuse" to send suspect Anne Sacoolas back to the UK.',
'url': 'https://www.bbc.co.uk/news/uk-england-northamptonshire-51075235',
'urlToImage': 'https://ichef.bbci.co.uk/news/1024/branded_news/6A72/production/_109205272_harry_dunn.jpg',
'publishedAt': '2020-01-11T08:40:54Z',
'content': "Image copyrightJustice4Harry19Image caption\r\n Harry Dunn died in hospital after his motorbike was involved in a crash outside RAF Croughton\r\nThe United States has criticised the UK's request to extradite an American accused of killing motorcyclist Harry Dunn,… [+2705 chars]"},
{'source': {'id': 'bbc-news', 'name': 'BBC News'},
'author': 'https://www.facebook.com/bbcnews',
'title': "Queen agrees 'transition' for Harry and Meghan",
'description': 'Queen agrees "period of transition" in which Harry and Meghan will spend time in Canada and UK - statement',
'url': 'https://www.bbc.co.uk/news/uk-51099102',
'urlToImage': 'https://ichef.bbci.co.uk/news/1024/branded_news/7A23/production/_97176213_breaking_news_bigger.png',
'publishedAt': '2020-01-13T17:07:05Z',
'content': 'The Queen has agreed a "period of transition" in which the Duke and Duchess of Sussex will spend time in Canada and the UK.\r\nShe said in a statement she is "entirely supportive" of their desire for a new role but "would have preferred them to remain full-time… [+322 chars]'},
{'source': {'id': 'bbc-news', 'name': 'BBC News'},
'author': 'https://www.facebook.com/bbcnews',
'title': 'Does your company nurture neurodiverse talent?',
'description': 'Universal Music UK have changed their work culture to be more inclusive to neurodiverse people.',
'url': 'https://www.bbc.co.uk/news/uk-51014028',
'urlToImage': 'https://ichef.bbci.co.uk/news/1024/branded_news/1039/production/_110535140_universal-reception.jpg',
'publishedAt': '2020-01-17T00:07:09Z',
'content': 'Image caption\r\n Universal Music HQ, London\r\nHow do you make your workplace more welcoming to neurodiverse employees, and ensure their talent is nurtured? \r\nDavid Joseph takes off his shoes, crosses his legs and tucks himself into an armchair. For the CEO of o… [+9676 chars]'},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': 'The best Valentine’s Day gifts for your boyfriend (UK edition)',
'description': 'Significant others are the people you know best, but sometimes they’re the hardest to shop for. Valentine’s Day can be an especially tricky event. How much do you spend? Do you go for an extravagant gift or keep it simple? And what about the classic “we’re no…',
'url': 'https://mashable.com/uk/gifts/valentines-day-gifts-boyfriend-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F17%252F2c%252F2746c6c8f3dc4942bc678f9b0dbe62b0.e1fd0.png%252F1200x630.png?signature=C9C13mlgJYj6Xk_pEgtkA0L9tJk=',
'publishedAt': '2020-01-21T14:16:11Z',
'content': "BEST FOR THE NOSTALGIC GAMER\r\nNintendo entertainment system\r\nRemind your boyfriend of his childhood with this classic gaming system. It comes pre-loaded with 'Super Mario Bros.', 'Donkey Kong', 'The Legend of Zelda', 'Pac-Man', and more. The console only come… [+108 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': 'The best Valentine’s Day gifts for your girlfriend (UK edition)',
'description': 'Valentine’s Day can be a weird event to shop for, especially as it seems like every gift option seems to be themed and covered in hearts. That’s fun for the few days surrounding Feb. 14, but what about the rest of the year? What are you supposed to do with a …',
'url': 'https://mashable.com/uk/gifts/valentines-day-gifts-girlfriend-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F21%252Fde%252Fbb1c9e579620459585b80f9c3489a287.2bb4f.png%252F1200x630.png?signature=IaOKZsDOVXhAJ0OaH8L4ONp3EQA=',
'publishedAt': '2020-01-22T14:26:38Z',
'content': 'BEST FOR THE GIRL INTO SKINCARE\r\nFacial cleansing brush\r\nYour girlfriend might not be able to afford to get a facial whenever she wants, but this cleansing brush is the next best thing. It pulsates to remove dirt, oil, makeup, and dead cells from deep within … [+84 chars]'},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': '6 of the best fitness trackers in the UK',
'description': "Whether you're reading this because you genuinely love working out or because your air fryer isn't making you feel as healthy as you predicted it would, one thing's for sure: A fitness tracker is a necessity for anyone hoping to get fit this year. The rhetori…",
'url': 'https://mashable.com/uk/roundup/best-fitness-trackers-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F29%252F8c%252F8d97bac19c7143528ef4b1f2987fd88d.095d3.jpg%252F1200x630.jpg?signature=mNQnMi06XCJjYFvc7PxpfdZNYoc=',
'publishedAt': '2020-01-30T14:08:45Z',
'content': "Whether you're reading this because you genuinely love working out or because your air fryer isn't making you feel as healthy as you predicted it would, one thing's for sure: A fitness tracker is a necessity for anyone hoping to get fit this year.\r\nThe rhetor… [+12338 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': '6 of the best waterproof speakers in the UK',
'description': "Being able to take your music with you wherever you go is one of the great wonders of smartphone technology, and is improved by a portable Bluetooth speaker that can boost the sound quality. Ideally though, you want a Bluetooth speaker that's waterproof. Besi…",
'url': 'https://mashable.com/uk/roundup/best-waterproof-bluetooth-speakers-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F07%252Fe4%252F1d97fb43daab4dffbc94acc145f8ec37.f7b13.png%252F1200x630.png?signature=LVpNsZds0GsIcN-gzoBO3l5L3iA=',
'publishedAt': '2020-01-08T14:22:14Z',
'content': "Being able to take your music with you wherever you go is one of the great wonders of smartphone technology, and is improved by a portable Bluetooth speaker that can boost the sound quality. Ideally though, you want a Bluetooth speaker that's waterproof.\r\nBes… [+9299 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': 'The best massage guns for percussive therapy in the UK',
'description': "Finding time for proper workout recovery is — but shouldn't have to be — a luxury. Between a job, social life, and chasing eight hours of sleep, workouts get squeezed into the day's only free time slot. A proper cool down (let alone a massage) is usually the …",
'url': 'https://mashable.com/uk/roundup/best-massage-guns-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F15%252F69%252Fc15327fa9d3f41a08921bcf4851acf59.ac985.png%252F1200x630.png?signature=W-zHk1kffHPunu4okUdkkOG4LOM=',
'publishedAt': '2020-01-18T09:00:00Z',
'content': "Finding time for proper workout recovery is \xa0but shouldn't have to be \xa0a luxury.\r\nBetween a job, social life, and chasing eight hours of sleep, workouts get squeezed into the day's only free time slot. A proper cool down (let alone a massage) is usually the t… [+9576 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': '8 of the best wireless earbuds in the UK',
'description': "Do the cords of your headphones look like some sort of impossible puzzle after spending just a few minutes inside a pocket or purse? Turns out there's a scientific explanation for that. In 2007, two researchers at the University of California decided to study…",
'url': 'https://mashable.com/uk/roundup/best-wireless-earbuds-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F15%252Fbe%252Fb06aa23259a94100a04e1ae9214b73fd.3f8de.png%252F1200x630.png?signature=E_cxKP_IiudYvAbAIVrvUjV6Un4=',
'publishedAt': '2020-01-16T14:34:06Z',
'content': "Do the cords of your headphones look like some sort of impossible puzzle after spending just a few minutes inside a pocket or purse? Turns out there's a scientific explanation for that.\r\nIn 2007, two researchers at the University of California decided to stud… [+9837 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': "Romantic Valentine's Day gifts that defy clichés (UK edition)",
'description': 'It’s not that there’s anything wrong with a box of chocolates or flowers on Valentine’s Day : Some things are clichés for a reason, and who doesn’t love being the envy of the office when a surprise bouquet of flowers is delivered to your office? However, the …',
'url': 'https://mashable.com/uk/gifts/romantic-valentines-day-gifts-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F24%252Ff5%252F103e765aa8d644eab8bbb2908e0db455.30df0.jpg%252F1200x630.jpg?signature=jcJmIkScuIABJe5uPATzcac1alo=',
'publishedAt': '2020-01-27T14:57:45Z',
'content': 'Its not that theres anything wrong with a box of chocolates or flowers on Valentines Day: Some things are clichés for a reason, and who doesnt love being the envy of the office when a surprise bouquet of flowers is delivered to your office?\r\nHowever, the fact… [+1869 chars]'},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': '6 of the best cold-press juicers in the UK',
'description': 'Raw. Organic. Superfood. Buzzwords that would have been roasted in an early 2000s episode of Sex and the City have turned into the pillars of modern wellness. The green juice trend is one that has really taken off, and drinks made with nothing but fruits and …',
'url': 'https://mashable.com/uk/roundup/best-cold-press-juicers-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F02%252F03%252Fe1%252F2de500620f90481d854da9efcdd4c4a1.34f41.png%252F1200x630.png?signature=9GyA_vvdwcr6j8K4lOhGa10FYfs=',
'publishedAt': '2020-02-04T15:04:10Z',
'content': 'Raw. Organic. Superfood. Buzzwords that would have been roasted in an early 2000s episode of Sex and the City have turned into the pillars of modern wellness. The green juice trend is one that has really taken off, and drinks made with nothing but fruits and … [+7582 chars]'},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': 'The best 4K TV deals in the UK this week',
'description': "TL;DR: The Hisense 75-inch 4K UHD HDR Smart TV is on sale for £999, saving you 41% on list price. Black Friday generally offers the best opportunity to save on 4K TVs, but the most recent event was a little underwhelming. Does this mean that we're no longer g…",
'url': 'https://mashable.com/uk/shopping/best-4k-tv-deals-jan-13/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F13%252F51%252F9994fbe86bb84405be953f4ddf3f025a.f3158.png%252F1200x630.png?signature=pz-CyebaqnDfLqOFqfhvxWI4K10=',
'publishedAt': '2020-01-13T14:14:31Z',
'content': "TL;DR: The Hisense 75-inch 4K UHD HDR Smart TV is on sale for £999, saving you 41% on list price.\r\nBlack Friday generally offers the best opportunity to save on 4K TVs, but the most recent event was a little underwhelming. Does this mean that we're no longer … [+1607 chars]"},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': "The best Valentine's Day gifts for your husband (UK edition)",
'description': 'Love or hate it, Valentine’s Day is the ideal time to right the gift-giving wrongs of Christmas. Were you sure your husband would love that faux-vintage, Bluetooth-compatible turntable, only to find out later that what he really wanted was a weighted blanket?…',
'url': 'https://mashable.com/uk/gifts/valentines-day-gifts-husband-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F28%252F33%252F0eee1e535f214bfc925547ff2f7e21d0.b1456.jpg%252F1200x630.jpg?signature=xGEiA0LJm97sn5ikap73WgrfvcM=',
'publishedAt': '2020-01-29T08:00:00Z',
'content': 'Love or hate it, Valentines Day is the ideal time to right the gift-giving wrongs of Christmas.\xa0\r\nWere you sure your husband would love that faux-vintage, Bluetooth-compatible turntable, only to find out later that what he really wanted was a weighted blanket… [+1433 chars]'},
{'source': {'id': 'mashable', 'name': 'Mashable'},
'author': 'Joseph Green',
'title': "The best Valentine's Day gifts for her (UK edition)",
'description': "Happy Valentine's Day season, lovebirds. Here's how to impress your wife, fiancé, girlfriend, or that cute girl in your building that you've been flirting with for months: Don't get her that gaudy chocolate diamond necklace that jewellery stores are shoving d…",
'url': 'https://mashable.com/uk/gifts/valentines-day-gifts-her-uk/',
'urlToImage': 'https://mondrian.mashable.com/2020%252F01%252F27%252Ffc%252Ff02fa4af4c5c403ca0ddadaae8d4f822.c30bd.png%252F1200x630.png?signature=j7Fs4Kf9SU0g3-tGwto3xpYiVCY=',
'publishedAt': '2020-01-28T14:53:02Z',
'content': 'BEST FOR THE ULTRA-BUSY LADY\r\nEcho Show 8\r\nSuperwomen who put more time into everyone else than they do themselves deserve an extra hand, and the Echo Show 8 is the device for the job. With the help of Alexa and the 8-inch screen (a happy medium between the 5… [+155 chars]'}]}
sources_success_response = {'status': 'ok', 'sources': [
{'id': 'abc-news', 'name': 'ABC News',
'description': 'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.',
'url': 'https://abcnews.go.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'abc-news-au', 'name': 'ABC News (AU)',
'description': "Australia's most trusted source of local, national and world news. Comprehensive, independent, in-depth analysis, the latest business, sport, weather and more.",
'url': 'http://www.abc.net.au/news', 'category': 'general', 'language': 'en', 'country': 'au'},
{'id': 'aftenposten', 'name': 'Aftenposten',
'description': 'Norges ledende nettavis med alltid oppdaterte nyheter innenfor innenriks, utenriks, sport og kultur.',
'url': 'https://www.aftenposten.no', 'category': 'general', 'language': 'no', 'country': 'no'},
{'id': 'al-jazeera-english', 'name': 'Al Jazeera English',
'description': 'News, analysis from the Middle East and worldwide, multimedia and interactives, opinions, documentaries, podcasts, long reads and broadcast schedule.',
'url': 'http://www.aljazeera.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'ansa', 'name': 'ANSA.it',
'description': 'Agenzia ANSA: ultime notizie, foto, video e approfondimenti su: cronaca, politica, economia, regioni, mondo, sport, calcio, cultura e tecnologia.',
'url': 'http://www.ansa.it', 'category': 'general', 'language': 'it', 'country': 'it'},
{'id': 'argaam', 'name': 'Argaam',
'description': 'ارقام موقع متخصص في متابعة سوق الأسهم السعودي تداول - تاسي - مع تغطيه معمقة لشركات واسعار ومنتجات البتروكيماويات , تقارير مالية الاكتتابات الجديده ',
'url': 'http://www.argaam.com', 'category': 'business', 'language': 'ar', 'country': 'sa'},
{'id': 'ars-technica', 'name': 'Ars Technica',
'description': "The PC enthusiast's resource. Power users and the tools they love, without computing religion.",
'url': 'http://arstechnica.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'ary-news', 'name': 'Ary News',
'description': 'ARY News is a Pakistani news channel committed to bring you up-to-the minute Pakistan news and featured stories from around Pakistan and all over the world.',
'url': 'https://arynews.tv/ud/', 'category': 'general', 'language': 'ud', 'country': 'pk'},
{'id': 'associated-press', 'name': 'Associated Press',
'description': 'The AP delivers in-depth coverage on the international, politics, lifestyle, business, and entertainment news.',
'url': 'https://apnews.com/', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'australian-financial-review', 'name': 'Australian Financial Review',
'description': 'The Australian Financial Review reports the latest news from business, finance, investment and politics, updated in real time. It has a reputation for independent, award-winning journalism and is essential reading for the business and investor community.',
'url': 'http://www.afr.com', 'category': 'business', 'language': 'en', 'country': 'au'},
{'id': 'axios', 'name': 'Axios',
'description': 'Axios are a new media company delivering vital, trustworthy news and analysis in the most efficient, illuminating and shareable ways possible.',
'url': 'https://www.axios.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'bbc-news', 'name': 'BBC News',
'description': 'Use BBC News for up-to-the-minute news, breaking news, video, audio and feature stories. BBC News provides trusted World and UK news as well as local and regional perspectives. Also entertainment, business, science, technology and health news.',
'url': 'http://www.bbc.co.uk/news', 'category': 'general', 'language': 'en', 'country': 'gb'},
{'id': 'bbc-sport', 'name': 'BBC Sport',
'description': 'The home of BBC Sport online. Includes live sports coverage, breaking news, results, video, audio and analysis on Football, F1, Cricket, Rugby Union, Rugby League, Golf, Tennis and all the main world sports, plus major events such as the Olympic Games.',
'url': 'http://www.bbc.co.uk/sport', 'category': 'sports', 'language': 'en', 'country': 'gb'},
{'id': 'bild', 'name': 'Bild',
'description': 'Die Seite 1 für aktuelle Nachrichten und Themen, Bilder und Videos aus den Bereichen News, Wirtschaft, Politik, Show, Sport, und Promis.',
'url': 'http://www.bild.de', 'category': 'general', 'language': 'de', 'country': 'de'},
{'id': 'blasting-news-br', 'name': 'Blasting News (BR)',
'description': 'Descubra a seção brasileira da Blasting News, a primeira revista feita pelo público, com notícias globais e vídeos independentes. Junte-se a nós e torne- se um repórter.',
'url': 'https://br.blastingnews.com', 'category': 'general', 'language': 'pt', 'country': 'br'},
{'id': 'bleacher-report', 'name': 'Bleacher Report',
'description': 'Sports journalists and bloggers covering NFL, MLB, NBA, NHL, MMA, college football and basketball, NASCAR, fantasy sports and more. News, photos, mock drafts, game scores, player profiles and more!',
'url': 'http://www.bleacherreport.com', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'bloomberg', 'name': 'Bloomberg',
'description': 'Bloomberg delivers business and markets news, data, analysis, and video to the world, featuring stories from Businessweek and Bloomberg News.',
'url': 'http://www.bloomberg.com', 'category': 'business', 'language': 'en', 'country': 'us'},
{'id': 'breitbart-news', 'name': 'Breitbart News',
'description': 'Syndicated news and opinion website providing continuously updated headlines to top news and analysis sources.',
'url': 'http://www.breitbart.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'business-insider', 'name': 'Business Insider',
'description': 'Business Insider is a fast-growing business site with deep financial, media, tech, and other industry verticals. Launched in 2007, the site is now the largest business news site on the web.',
'url': 'http://www.businessinsider.com', 'category': 'business', 'language': 'en', 'country': 'us'},
{'id': 'business-insider-uk', 'name': 'Business Insider (UK)',
'description': 'Business Insider is a fast-growing business site with deep financial, media, tech, and other industry verticals. Launched in 2007, the site is now the largest business news site on the web.',
'url': 'http://uk.businessinsider.com', 'category': 'business', 'language': 'en', 'country': 'gb'},
{'id': 'buzzfeed', 'name': 'Buzzfeed',
'description': 'BuzzFeed is a cross-platform, global network for news and entertainment that generates seven billion views each month.',
'url': 'https://www.buzzfeed.com', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'cbc-news', 'name': 'CBC News',
'description': "CBC News is the division of the Canadian Broadcasting Corporation responsible for the news gathering and production of news programs on the corporation's English-language operations, namely CBC Television, CBC Radio, CBC News Network, and CBC.ca.",
'url': 'http://www.cbc.ca/news', 'category': 'general', 'language': 'en', 'country': 'ca'},
{'id': 'cbs-news', 'name': 'CBS News',
'description': 'CBS News: dedicated to providing the best in journalism under standards it pioneered at the dawn of radio and television and continue in the digital age.',
'url': 'http://www.cbsnews.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'cnbc', 'name': 'CNBC',
'description': 'Get latest business news on stock markets, financial & earnings on CNBC. View world markets streaming charts & video; check stock tickers and quotes.',
'url': 'http://www.cnbc.com', 'category': 'business', 'language': 'en', 'country': 'us'},
{'id': 'cnn', 'name': 'CNN',
'description': 'View the latest news and breaking news today for U.S., world, weather, entertainment, politics and health at CNN',
'url': 'http://us.cnn.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'cnn-es', 'name': 'CNN Spanish',
'description': 'Lee las últimas noticias e información sobre Latinoamérica, Estados Unidos, mundo, entretenimiento, política, salud, tecnología y deportes en CNNEspañol.com.',
'url': 'http://cnnespanol.cnn.com/', 'category': 'general', 'language': 'es', 'country': 'us'},
{'id': 'crypto-coins-news', 'name': 'Crypto Coins News',
'description': 'Providing breaking cryptocurrency news - focusing on Bitcoin, Ethereum, ICOs, blockchain technology, and smart contracts.',
'url': 'https://www.ccn.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'der-tagesspiegel', 'name': 'Der Tagesspiegel',
'description': 'Nachrichten, News und neueste Meldungen aus dem Inland und dem Ausland - aktuell präsentiert von tagesspiegel.de.',
'url': 'http://www.tagesspiegel.de', 'category': 'general', 'language': 'de', 'country': 'de'},
{'id': 'die-zeit', 'name': 'Die Zeit',
'description': 'Aktuelle Nachrichten, Kommentare, Analysen und Hintergrundberichte aus Politik, Wirtschaft, Gesellschaft, Wissen, Kultur und Sport lesen Sie auf ZEIT ONLINE.',
'url': 'http://www.zeit.de/index', 'category': 'business', 'language': 'de', 'country': 'de'},
{'id': 'el-mundo', 'name': 'El Mundo',
'description': 'Noticias, actualidad, álbumes, debates, sociedad, servicios, entretenimiento y última hora en España y el mundo.',
'url': 'http://www.elmundo.es', 'category': 'general', 'language': 'es', 'country': 'es'},
{'id': 'engadget', 'name': 'Engadget',
'description': 'Engadget is a web magazine with obsessive daily coverage of everything new in gadgets and consumer electronics.',
'url': 'https://www.engadget.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'entertainment-weekly', 'name': 'Entertainment Weekly',
'description': 'Online version of the print magazine includes entertainment news, interviews, reviews of music, film, TV and books, and a special area for magazine subscribers.',
'url': 'http://www.ew.com', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'espn', 'name': 'ESPN',
'description': 'ESPN has up-to-the-minute sports news coverage, scores, highlights and commentary for NFL, MLB, NBA, College Football, NCAA Basketball and more.',
'url': 'http://espn.go.com', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'espn-cric-info', 'name': 'ESPN Cric Info',
'description': 'ESPN Cricinfo provides the most comprehensive cricket coverage available including live ball-by-ball commentary, news, unparalleled statistics, quality editorial comment and analysis.',
'url': 'http://www.espncricinfo.com/', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'financial-post', 'name': 'Financial Post',
'description': 'Find the latest happenings in the Canadian Financial Sector and stay up to date with changing trends in Business Markets. Read trading and investing advice from professionals.',
'url': 'http://business.financialpost.com', 'category': 'business', 'language': 'en', 'country': 'ca'},
{'id': 'focus', 'name': 'Focus',
'description': 'Minutenaktuelle Nachrichten und Service-Informationen von Deutschlands modernem Nachrichtenmagazin.',
'url': 'http://www.focus.de', 'category': 'general', 'language': 'de', 'country': 'de'},
{'id': 'football-italia', 'name': 'Football Italia',
'description': 'Italian football news, analysis, fixtures and results for the latest from Serie A, Serie B and the Azzurri.',
'url': 'http://www.football-italia.net', 'category': 'sports', 'language': 'en', 'country': 'it'},
{'id': 'fortune', 'name': 'Fortune', 'description': 'Fortune 500 Daily and Breaking Business News',
'url': 'http://fortune.com', 'category': 'business', 'language': 'en', 'country': 'us'},
{'id': 'four-four-two', 'name': 'FourFourTwo',
'description': 'The latest football news, in-depth features, tactical and statistical analysis from FourFourTwo, the UK's favourite football monthly.',
'url': 'http://www.fourfourtwo.com/news', 'category': 'sports', 'language': 'en', 'country': 'gb'},
{'id': 'fox-news', 'name': 'Fox News',
'description': 'Breaking News, Latest News and Current News from FOXNews.com. Breaking news and video. Latest Current News: U.S., World, Entertainment, Health, Business, Technology, Politics, Sports.',
'url': 'http://www.foxnews.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'fox-sports', 'name': 'Fox Sports',
'description': 'Find live scores, player and team news, videos, rumors, stats, standings, schedules and fantasy games on FOX Sports.',
'url': 'http://www.foxsports.com', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'globo', 'name': 'Globo',
'description': 'Só na globo.com você encontra tudo sobre o conteúdo e marcas do Grupo Globo. O melhor acervo de vídeos online sobre entretenimento, esportes e jornalismo do Brasil.',
'url': 'http://www.globo.com/', 'category': 'general', 'language': 'pt', 'country': 'br'},
{'id': 'google-news', 'name': 'Google News',
'description': 'Comprehensive, up-to-date news coverage, aggregated from sources all over the world by Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'google-news-ar', 'name': 'Google News (Argentina)',
'description': 'Completa cobertura actualizada de noticias agregadas a partir de fuentes de todo el mundo por Google Noticias.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'es', 'country': 'ar'},
{'id': 'google-news-au', 'name': 'Google News (Australia)',
'description': 'Comprehensive, up-to-date Australia news coverage, aggregated from sources all over the world by Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'en', 'country': 'au'},
{'id': 'google-news-br', 'name': 'Google News (Brasil)',
'description': 'Cobertura jornalística abrangente e atualizada, agregada de fontes do mundo inteiro pelo Google Notícias.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'pt', 'country': 'br'},
{'id': 'google-news-ca', 'name': 'Google News (Canada)',
'description': 'Comprehensive, up-to-date Canada news coverage, aggregated from sources all over the world by Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'en', 'country': 'ca'},
{'id': 'google-news-fr', 'name': 'Google News (France)',
'description': 'Informations complètes et à jour, compilées par Google Actualités à partir de sources d'actualités du monde entier.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'fr', 'country': 'fr'},
{'id': 'google-news-in', 'name': 'Google News (India)',
'description': 'Comprehensive, up-to-date India news coverage, aggregated from sources all over the world by Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'en', 'country': 'in'},
{'id': 'google-news-is', 'name': 'Google News (Israel)',
'description': 'כיסוי מקיף ועדכני של חדשות שהצטברו ממקורות בכל העולם על ידי 'חדשות Google'.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'he', 'country': 'is'},
{'id': 'google-news-it', 'name': 'Google News (Italy)',
'description': 'Copertura giornalistica completa e aggiornata ottenuta combinando fonti di notizie in tutto il mondo attraverso Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'it', 'country': 'it'},
{'id': 'google-news-ru', 'name': 'Google News (Russia)',
'description': 'Исчерпывающая и актуальная информация, собранная службой "Новости Google" со всего света.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'ru', 'country': 'ru'},
{'id': 'google-news-sa', 'name': 'Google News (Saudi Arabia)',
'description': 'تغطية شاملة ومتجددة للأخبار، تم جمعها من مصادر أخبار من جميع أنحاء العالم بواسطة أخبار Google.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'ar', 'country': 'sa'},
{'id': 'google-news-uk', 'name': 'Google News (UK)',
'description': 'Comprehensive, up-to-date UK news coverage, aggregated from sources all over the world by Google News.',
'url': 'https://news.google.com', 'category': 'general', 'language': 'en', 'country': 'gb'},
{'id': 'goteborgs-posten', 'name': 'Göteborgs-Posten',
'description': 'Göteborgs-Posten, abbreviated GP, is a major Swedish language daily newspaper published in Gothenburg, Sweden.',
'url': 'http://www.gp.se', 'category': 'general', 'language': 'se', 'country': 'se'},
{'id': 'gruenderszene', 'name': 'Gruenderszene',
'description': 'Online-Magazin für Startups und die digitale Wirtschaft. News und Hintergründe zu Investment, VC und Gründungen.',
'url': 'http://www.gruenderszene.de', 'category': 'technology', 'language': 'de', 'country': 'de'},
{'id': 'hacker-news', 'name': 'Hacker News',
'description': 'Hacker News is a social news website focusing on computer science and entrepreneurship. It is run by Paul Graham\'s investment fund and startup incubator, Y Combinator. In general, content that can be submitted is defined as "anything that gratifies one\'s intellectual curiosity".',
'url': 'https://news.ycombinator.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'handelsblatt', 'name': 'Handelsblatt',
'description': 'Auf Handelsblatt lesen sie Nachrichten über Unternehmen, Finanzen, Politik und Technik. Verwalten Sie Ihre Finanzanlagen mit Hilfe unserer Börsenkurse.',
'url': 'http://www.handelsblatt.com', 'category': 'business', 'language': 'de', 'country': 'de'},
{'id': 'ign', 'name': 'IGN',
'description': 'IGN is your site for Xbox One, PS4, PC, Wii-U, Xbox 360, PS3, Wii, 3DS, PS Vita and iPhone games with expert reviews, news, previews, trailers, cheat codes, wiki guides and walkthroughs.',
'url': 'http://www.ign.com', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'il-sole-24-ore', 'name': 'Il Sole 24 Ore',
'description': 'Notizie di economia, cronaca italiana ed estera, quotazioni borsa in tempo reale e di finanza, norme e tributi, fondi e obbligazioni, mutui, prestiti e lavoro a cura de Il Sole 24 Ore.',
'url': 'https://www.ilsole24ore.com', 'category': 'business', 'language': 'it', 'country': 'it'},
{'id': 'independent', 'name': 'Independent',
'description': 'National morning quality (tabloid) includes free online access to news and supplements. Insight by Robert Fisk and various other columnists.',
'url': 'http://www.independent.co.uk', 'category': 'general', 'language': 'en', 'country': 'gb'},
{'id': 'infobae', 'name': 'Infobae',
'description': 'Noticias de Argentina y del mundo en tiempo real. Información, videos y fotos sobre los hechos más relevantes y sus protagonistas. Léelo antes en infobae.',
'url': 'http://www.infobae.com/?noredirect', 'category': 'general', 'language': 'es', 'country': 'ar'},
{'id': 'info-money', 'name': 'InfoMoney',
'description': 'No InfoMoney você encontra tudo o que precisa sobre dinheiro. Ações, investimentos, bolsas de valores e muito mais. Aqui você encontra informação que vale dinheiro!',
'url': 'https://www.infomoney.com.br', 'category': 'business', 'language': 'pt', 'country': 'br'},
{'id': 'la-gaceta', 'name': 'La Gaceta',
'description': 'El diario de Tucumán, noticias 24 horas online - San Miguel de Tucumán - Argentina - Ultimo momento - Ultimas noticias.',
'url': 'http://www.lagaceta.com.ar', 'category': 'general', 'language': 'es', 'country': 'ar'},
{'id': 'la-nacion', 'name': 'La Nacion',
'description': 'Información confiable en Internet. Noticias de Argentina y del mundo - ¡Informate ya!',
'url': 'http://www.lanacion.com.ar', 'category': 'general', 'language': 'es', 'country': 'ar'},
{'id': 'la-repubblica', 'name': 'La Repubblica',
'description': 'Breaking News, Latest News and Current News from FOXNews.com. Breaking news and video. Latest Current News: U.S., World, Entertainment, Health, Business, Technology, Politics, Sports.',
'url': 'http://www.repubblica.it', 'category': 'general', 'language': 'it', 'country': 'it'},
{'id': 'le-monde', 'name': 'Le Monde',
'description': "Les articles du journal et toute l'actualité en continu : International, France, Société, Economie, Culture, Environnement, Blogs ...",
'url': 'http://www.lemonde.fr', 'category': 'general', 'language': 'fr', 'country': 'fr'},
{'id': 'lenta', 'name': 'Lenta',
'description': 'Новости, статьи, фотографии, видео. Семь дней в неделю, 24 часа в сутки.',
'url': 'https://lenta.ru', 'category': 'general', 'language': 'ru', 'country': 'ru'},
{'id': 'lequipe', 'name': "L'equipe",
'description': "Le sport en direct sur L'EQUIPE.fr. Les informations, résultats et classements de tous les sports. Directs commentés, images et vidéos à regarder et à partager !",
'url': 'https://www.lequipe.fr', 'category': 'sports', 'language': 'fr', 'country': 'fr'},
{'id': 'les-echos', 'name': 'Les Echos',
'description': "Toute l'actualité économique, financière et boursière française et internationale sur Les Echos.fr",
'url': 'https://www.lesechos.fr', 'category': 'business', 'language': 'fr', 'country': 'fr'},
{'id': 'liberation', 'name': 'Libération',
'description': "Toute l'actualité en direct - photos et vidéos avec Libération",
'url': 'http://www.liberation.fr', 'category': 'general', 'language': 'fr', 'country': 'fr'},
{'id': 'marca', 'name': 'Marca',
'description': 'La mejor información deportiva en castellano actualizada minuto a minuto en noticias, vídeos, fotos, retransmisiones y resultados en directo.',
'url': 'http://www.marca.com', 'category': 'sports', 'language': 'es', 'country': 'es'},
{'id': 'mashable', 'name': 'Mashable',
'description': 'Mashable is a global, multi-platform media and entertainment company.',
'url': 'https://mashable.com', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'medical-news-today', 'name': 'Medical News Today',
'description': 'Medical news and health news headlines posted throughout the day, every day.',
'url': 'http://www.medicalnewstoday.com', 'category': 'health', 'language': 'en', 'country': 'us'},
{'id': 'msnbc', 'name': 'MSNBC',
'description': 'Breaking news and in-depth analysis of the headlines, as well as commentary and informed perspectives from The Rachel Maddow Show, Morning Joe & more.',
'url': 'http://www.msnbc.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'mtv-news', 'name': 'MTV News',
'description': "The ultimate news source for music, celebrity, entertainment, movies, and current events on the web. It's pop culture on steroids.",
'url': 'http://www.mtv.com/news', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'mtv-news-uk', 'name': 'MTV News (UK)',
'description': 'All the latest celebrity news, gossip, exclusive interviews and pictures from the world of music and entertainment.',
'url': 'http://www.mtv.co.uk/news', 'category': 'entertainment', 'language': 'en', 'country': 'gb'},
{'id': 'national-geographic', 'name': 'National Geographic',
'description': 'Reporting our world daily: original nature and science news from National Geographic.',
'url': 'http://news.nationalgeographic.com', 'category': 'science', 'language': 'en', 'country': 'us'},
{'id': 'national-review', 'name': 'National Review',
'description': 'National Review: Conservative News, Opinion, Politics, Policy, & Current Events.',
'url': 'https://www.nationalreview.com/', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'nbc-news', 'name': 'NBC News',
'description': 'Breaking news, videos, and the latest top stories in world news, business, politics, health and pop culture.',
'url': 'http://www.nbcnews.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'news24', 'name': 'News24',
'description': "South Africa's premier news source, provides breaking news on national, world, Africa, sport, entertainment, technology and more.",
'url': 'http://www.news24.com', 'category': 'general', 'language': 'en', 'country': 'za'},
{'id': 'new-scientist', 'name': 'New Scientist',
'description': 'Breaking science and technology news from around the world. Exclusive stories and expert analysis on space, technology, health, physics, life and Earth.',
'url': 'https://www.newscientist.com/section/news', 'category': 'science', 'language': 'en',
'country': 'us'}, {'id': 'news-com-au', 'name': 'News.com.au',
'description': 'We say what people are thinking and cover the issues that get people talking balancing Australian and global moments — from politics to pop culture.',
'url': 'http://www.news.com.au', 'category': 'general', 'language': 'en',
'country': 'au'}, {'id': 'newsweek', 'name': 'Newsweek',
'description': 'Newsweek provides in-depth analysis, news and opinion about international issues, technology, business, culture and politics.',
'url': 'https://www.newsweek.com', 'category': 'general',
'language': 'en', 'country': 'us'},
{'id': 'new-york-magazine', 'name': 'New York Magazine',
'description': 'NYMAG and New York magazine cover the new, the undiscovered, the next in politics, culture, food, fashion, and behavior nationally, through a New York lens.',
'url': 'http://nymag.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'next-big-future', 'name': 'Next Big Future',
'description': 'Coverage of science and technology that have the potential for disruption, and analysis of plans, policies, and technology that enable radical improvement.',
'url': 'https://www.nextbigfuture.com', 'category': 'science', 'language': 'en', 'country': 'us'},
{'id': 'nfl-news', 'name': 'NFL News',
'description': 'The official source for NFL news, schedules, stats, scores and more.',
'url': 'http://www.nfl.com/news', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'nhl-news', 'name': 'NHL News',
'description': 'The most up-to-date breaking hockey news from the official source including interviews, rumors, statistics and schedules.',
'url': 'https://www.nhl.com/news', 'category': 'sports', 'language': 'en', 'country': 'us'},
{'id': 'nrk', 'name': 'NRK',
'description': 'NRK er Norges største tilbud på nett: nyheter fra Norge og verden, lokalnyheter, radio- og tv-program, podcast, vær, helse-, kultur-, underholdning-, humor- og debattstoff.',
'url': 'https://www.nrk.no', 'category': 'general', 'language': 'no', 'country': 'no'},
{'id': 'politico', 'name': 'Politico',
'description': 'Political news about Congress, the White House, campaigns, lobbyists and issues.',
'url': 'https://www.politico.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'polygon', 'name': 'Polygon',
'description': 'Polygon is a gaming website in partnership with Vox Media. Our culture focused site covers games, their creators, the fans, trending stories and entertainment news.',
'url': 'http://www.polygon.com', 'category': 'entertainment', 'language': 'en', 'country': 'us'},
{'id': 'rbc', 'name': 'RBC',
'description': 'Главные новости политики, экономики и бизнеса, комментарии аналитиков, финансовые данные с российских и мировых биржевых систем на сайте rbc.ru.',
'url': 'https://www.rbc.ru', 'category': 'general', 'language': 'ru', 'country': 'ru'},
{'id': 'recode', 'name': 'Recode',
'description': 'Get the latest independent tech news, reviews and analysis from Recode with the most informed and respected journalists in technology and media.',
'url': 'http://www.recode.net', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'reddit-r-all', 'name': 'Reddit /r/all',
'description': "Reddit is an entertainment, social news networking service, and news website. Reddit's registered community members can submit content, such as text posts or direct links.",
'url': 'https://www.reddit.com/r/all', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'reuters', 'name': 'Reuters',
'description': 'Reuters.com brings you the latest news from around the world, covering breaking news in business, politics, entertainment, technology, video and pictures.',
'url': 'http://www.reuters.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'rt', 'name': 'RT',
'description': 'Актуальная картина дня на RT: круглосуточное ежедневное обновление новостей политики, бизнеса, финансов, спорта, науки, культуры. Онлайн-репортажи с места событий. Комментарии экспертов, актуальные интервью, фото и видео репортажи.',
'url': 'https://russian.rt.com', 'category': 'general', 'language': 'ru', 'country': 'ru'},
{'id': 'rte', 'name': 'RTE',
'description': "Get all of the latest breaking local and international news stories as they happen, with up to the minute updates and analysis, from Ireland's National Broadcaster.",
'url': 'https://www.rte.ie/news', 'category': 'general', 'language': 'en', 'country': 'ie'},
{'id': 'rtl-nieuws', 'name': 'RTL Nieuws',
'description': 'Volg het nieuws terwijl het gebeurt. RTL Nieuws informeert haar lezers op een onafhankelijke, boeiende en toegankelijke wijze over belangrijke ontwikkelingen in eigen land en de rest van de wereld.',
'url': 'https://www.rtlnieuws.nl/', 'category': 'general', 'language': 'nl', 'country': 'nl'},
{'id': 'sabq', 'name': 'SABQ',
'description': 'صحيفة الكترونية سعودية هدفها السبق في نقل الحدث بمهنية ومصداقية خدمة للوطن والمواطن.',
'url': 'https://sabq.org', 'category': 'general', 'language': 'ar', 'country': 'sa'},
{'id': 'spiegel-online', 'name': 'Spiegel Online',
'description': 'Deutschlands führende Nachrichtenseite. Alles Wichtige aus Politik, Wirtschaft, Sport, Kultur, Wissenschaft, Technik und mehr.',
'url': 'http://www.spiegel.de', 'category': 'general', 'language': 'de', 'country': 'de'},
{'id': 'svenska-dagbladet', 'name': 'Svenska Dagbladet',
'description': 'Sveriges ledande mediesajt - SvD.se. Svenska Dagbladets nyhetssajt låter läsarna ta plats och fördjupar nyheterna.',
'url': 'https://www.svd.se', 'category': 'general', 'language': 'se', 'country': 'se'},
{'id': 't3n', 'name': 'T3n',
'description': 'Das Online-Magazin bietet Artikel zu den Themen E-Business, Social Media, Startups und Webdesign.',
'url': 'https://t3n.de', 'category': 'technology', 'language': 'de', 'country': 'de'},
{'id': 'talksport', 'name': 'TalkSport',
'description': "Tune in to the world's biggest sports radio station - Live Premier League football coverage, breaking sports news, transfer rumours & exclusive interviews.",
'url': 'http://talksport.com', 'category': 'sports', 'language': 'en', 'country': 'gb'},
{'id': 'techcrunch', 'name': 'TechCrunch',
'description': 'TechCrunch is a leading technology media property, dedicated to obsessively profiling startups, reviewing new Internet products, and breaking tech news.',
'url': 'https://techcrunch.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'techcrunch-cn', 'name': 'TechCrunch (CN)',
'description': 'TechCrunch is a leading technology media property, dedicated to obsessively profiling startups, reviewing new Internet products, and breaking tech news.',
'url': 'https://techcrunch.cn', 'category': 'technology', 'language': 'zh', 'country': 'zh'},
{'id': 'techradar', 'name': 'TechRadar',
'description': 'The latest technology news and reviews, covering computing, home entertainment systems, gadgets and more.',
'url': 'http://www.techradar.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'the-american-conservative', 'name': 'The American Conservative',
'description': 'Realism and reform. A new voice for a new generation of conservatives.',
'url': 'http://www.theamericanconservative.com/', 'category': 'general', 'language': 'en',
'country': 'us'}, {'id': 'the-globe-and-mail', 'name': 'The Globe And Mail',
'description': 'The Globe and Mail offers the most authoritative news in Canada, featuring national and international news.',
'url': 'https://www.theglobeandmail.com', 'category': 'general', 'language': 'en',
'country': 'ca'}, {'id': 'the-hill', 'name': 'The Hill',
'description': 'The Hill is a top US political website, read by the White House and more lawmakers than any other site -- vital for policy, politics and election campaigns.',
'url': 'http://thehill.com', 'category': 'general', 'language': 'en',
'country': 'us'}, {'id': 'the-hindu', 'name': 'The Hindu',
'description': "The Hindu. latest news, analysis, comment, in-depth coverage of politics, business, sport, environment, cinema and arts from India's national newspaper.",
'url': 'http://www.thehindu.com',
'category': 'general', 'language': 'en',
'country': 'in'},
{'id': 'the-huffington-post', 'name': 'The Huffington Post',
'description': 'The Huffington Post is a politically liberal American online news aggregator and blog that has both localized and international editions founded by Arianna Huffington, Kenneth Lerer, Andrew Breitbart, and Jonah Peretti, featuring columnists.',
'url': 'http://www.huffingtonpost.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'the-irish-times', 'name': 'The Irish Times',
'description': 'The Irish Times online. Latest news including sport, analysis, business, weather and more from the definitive brand of quality news in Ireland.',
'url': 'https://www.irishtimes.com', 'category': 'general', 'language': 'en', 'country': 'ie'},
{'id': 'the-jerusalem-post', 'name': 'The Jerusalem Post',
'description': 'The Jerusalem Post is the leading online newspaper for English speaking Jewry since 1932, bringing news and updates from the Middle East and all over the Jewish world.',
'url': 'https://www.jpost.com/', 'category': 'general', 'language': 'en', 'country': 'is'},
{'id': 'the-lad-bible', 'name': 'The Lad Bible',
'description': 'The LAD Bible is one of the largest community for guys aged 16-30 in the world. Send us your funniest pictures and videos!',
'url': 'https://www.theladbible.com', 'category': 'entertainment', 'language': 'en', 'country': 'gb'},
{'id': 'the-next-web', 'name': 'The Next Web',
'description': 'The Next Web is one of the world’s largest online publications that delivers an international perspective on the latest news about Internet technology, business and culture.',
'url': 'http://thenextweb.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'the-sport-bible', 'name': 'The Sport Bible',
'description': 'TheSPORTbible is one of the largest communities for sports fans across the world. Send us your sporting pictures and videos!',
'url': 'https://www.thesportbible.com', 'category': 'sports', 'language': 'en', 'country': 'gb'},
{'id': 'the-times-of-india', 'name': 'The Times of India',
'description': 'Times of India brings the Latest News and Top Breaking headlines on Politics and Current Affairs in India and around the World, Sports, Business, Bollywood News and Entertainment, Science, Technology, Health and Fitness news, Cricket and opinions from leading columnists.',
'url': 'http://timesofindia.indiatimes.com', 'category': 'general', 'language': 'en', 'country': 'in'},
{'id': 'the-verge', 'name': 'The Verge',
'description': 'The Verge covers the intersection of technology, science, art, and culture.',
'url': 'http://www.theverge.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'the-wall-street-journal', 'name': 'The Wall Street Journal',
'description': 'WSJ online coverage of breaking news and current headlines from the US and around the world. Top stories, photos, videos, detailed analysis and in-depth reporting.',
'url': 'http://www.wsj.com', 'category': 'business', 'language': 'en', 'country': 'us'},
{'id': 'the-washington-post', 'name': 'The Washington Post',
'description': 'Breaking news and analysis on politics, business, world national news, entertainment more. In-depth DC, Virginia, Maryland news coverage including traffic, weather, crime, education, restaurant reviews and more.',
'url': 'https://www.washingtonpost.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'the-washington-times', 'name': 'The Washington Times',
'description': 'The Washington Times delivers breaking news and commentary on the issues that affect the future of our nation.',
'url': 'https://www.washingtontimes.com/', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'time', 'name': 'Time',
'description': 'Breaking news and analysis from TIME.com. Politics, world news, photos, video, tech reviews, health, science and entertainment news.',
'url': 'http://time.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'usa-today', 'name': 'USA Today',
'description': 'Get the latest national, international, and political news at USATODAY.com.',
'url': 'http://www.usatoday.com/news', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'vice-news', 'name': 'Vice News',
'description': 'Vice News is Vice Media, Inc.\'s current affairs channel, producing daily documentary essays and video through its website and YouTube channel. It promotes itself on its coverage of "under - reported stories".',
'url': 'https://news.vice.com', 'category': 'general', 'language': 'en', 'country': 'us'},
{'id': 'wired', 'name': 'Wired',
'description': 'Wired is a monthly American magazine, published in print and online editions, that focuses on how emerging technologies affect culture, the economy, and politics.',
'url': 'https://www.wired.com', 'category': 'technology', 'language': 'en', 'country': 'us'},
{'id': 'wired-de', 'name': 'Wired.de',
'description': 'Wired reports on how emerging technologies affect culture, the economy and politics.',
'url': 'https://www.wired.de', 'category': 'technology', 'language': 'de', 'country': 'de'},
{'id': 'wirtschafts-woche', 'name': 'Wirtschafts Woche',
'description': 'Das Online-Portal des führenden Wirtschaftsmagazins in Deutschland. Das Entscheidende zu Unternehmen, Finanzen, Erfolg und Technik.',
'url': 'http://www.wiwo.de', 'category': 'business', 'language': 'de', 'country': 'de'},
{'id': 'xinhua-net', 'name': 'Xinhua Net',
'description': '中国主要重点新闻网站,依托新华社遍布全球的采编网络,记者遍布世界100多个国家和地区,地方频道分布全国31个省市自治区,每天24小时同时使用6种语言滚动发稿,权威、准确、及时播发国内外重要新闻和重大突发事件,受众覆盖200多个国家和地区,发展论坛是全球知名的中文论坛。',
'url': 'http://xinhuanet.com/', 'category': 'general', 'language': 'zh', 'country': 'zh'},
{'id': 'ynet', 'name': 'Ynet',
'description': 'ynet דף הבית: אתר החדשות המוביל בישראל מבית ידיעות אחרונות. סיקור מלא של חדשות מישראל והעולם, ספורט, כלכלה, תרבות, אוכל, מדע וטבע, כל מה שקורה וכל מה שמעניין ב ynet.',
'url': 'http://www.ynet.co.il', 'category': 'general', 'language': 'he', 'country': 'is'}]}
|
51fb1514eea3b1cbc4edca7828f6fd72a516f0b6
|
aeef2494b283012ed619870c4275e7d015f4017a
|
/sdk/python/pulumi_gcp/gkebackup/backup_plan.py
|
82557466aebb0f08e7651fa535427eabe5f8b17e
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-gcp
|
d4fd3f80c3df5290edaf33eb5eafe34e6699d0ff
|
7deea0a50a4ee5ab7bd722a83eca01707e298f85
|
refs/heads/master
| 2023-08-31T07:12:45.921522
| 2023-08-31T06:16:27
| 2023-08-31T06:16:27
| 97,485,806
| 160
| 63
|
Apache-2.0
| 2023-09-14T19:49:36
| 2017-07-17T14:28:37
|
Java
|
UTF-8
|
Python
| false
| false
| 49,904
|
py
|
backup_plan.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BackupPlanArgs', 'BackupPlan']
@pulumi.input_type
class BackupPlanArgs:
def __init__(__self__, *,
cluster: pulumi.Input[str],
location: pulumi.Input[str],
backup_config: Optional[pulumi.Input['BackupPlanBackupConfigArgs']] = None,
backup_schedule: Optional[pulumi.Input['BackupPlanBackupScheduleArgs']] = None,
deactivated: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
retention_policy: Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']] = None):
"""
The set of arguments for constructing a BackupPlan resource.
:param pulumi.Input[str] cluster: The source cluster from which Backups will be created via this BackupPlan.
:param pulumi.Input[str] location: The region of the Backup Plan.
- - -
:param pulumi.Input['BackupPlanBackupConfigArgs'] backup_config: Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
:param pulumi.Input['BackupPlanBackupScheduleArgs'] backup_schedule: Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
:param pulumi.Input[bool] deactivated: This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
:param pulumi.Input[str] description: User specified descriptive string for this BackupPlan.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
:param pulumi.Input[str] name: The full name of the BackupPlan Resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input['BackupPlanRetentionPolicyArgs'] retention_policy: RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
"""
pulumi.set(__self__, "cluster", cluster)
pulumi.set(__self__, "location", location)
if backup_config is not None:
pulumi.set(__self__, "backup_config", backup_config)
if backup_schedule is not None:
pulumi.set(__self__, "backup_schedule", backup_schedule)
if deactivated is not None:
pulumi.set(__self__, "deactivated", deactivated)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input[str]:
"""
The source cluster from which Backups will be created via this BackupPlan.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter
def location(self) -> pulumi.Input[str]:
"""
The region of the Backup Plan.
- - -
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="backupConfig")
def backup_config(self) -> Optional[pulumi.Input['BackupPlanBackupConfigArgs']]:
"""
Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_config")
@backup_config.setter
def backup_config(self, value: Optional[pulumi.Input['BackupPlanBackupConfigArgs']]):
pulumi.set(self, "backup_config", value)
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> Optional[pulumi.Input['BackupPlanBackupScheduleArgs']]:
"""
Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_schedule")
@backup_schedule.setter
def backup_schedule(self, value: Optional[pulumi.Input['BackupPlanBackupScheduleArgs']]):
pulumi.set(self, "backup_schedule", value)
@property
@pulumi.getter
def deactivated(self) -> Optional[pulumi.Input[bool]]:
"""
This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
"""
return pulumi.get(self, "deactivated")
@deactivated.setter
def deactivated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deactivated", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User specified descriptive string for this BackupPlan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The full name of the BackupPlan Resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']]:
"""
RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
"""
return pulumi.get(self, "retention_policy")
@retention_policy.setter
def retention_policy(self, value: Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']]):
pulumi.set(self, "retention_policy", value)
@pulumi.input_type
class _BackupPlanState:
def __init__(__self__, *,
backup_config: Optional[pulumi.Input['BackupPlanBackupConfigArgs']] = None,
backup_schedule: Optional[pulumi.Input['BackupPlanBackupScheduleArgs']] = None,
cluster: Optional[pulumi.Input[str]] = None,
deactivated: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protected_pod_count: Optional[pulumi.Input[int]] = None,
retention_policy: Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']] = None,
state: Optional[pulumi.Input[str]] = None,
state_reason: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BackupPlan resources.
:param pulumi.Input['BackupPlanBackupConfigArgs'] backup_config: Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
:param pulumi.Input['BackupPlanBackupScheduleArgs'] backup_schedule: Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
:param pulumi.Input[str] cluster: The source cluster from which Backups will be created via this BackupPlan.
:param pulumi.Input[bool] deactivated: This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
:param pulumi.Input[str] description: User specified descriptive string for this BackupPlan.
:param pulumi.Input[str] etag: etag is used for optimistic concurrency control as a way to help prevent simultaneous
updates of a backup plan from overwriting each other. It is strongly suggested that
systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates
in order to avoid race conditions: An etag is returned in the response to backupPlans.get,
and systems are expected to put that etag in the request to backupPlans.patch or
backupPlans.delete to ensure that their change will be applied to the same version of the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
:param pulumi.Input[str] location: The region of the Backup Plan.
- - -
:param pulumi.Input[str] name: The full name of the BackupPlan Resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[int] protected_pod_count: The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.
:param pulumi.Input['BackupPlanRetentionPolicyArgs'] retention_policy: RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
:param pulumi.Input[str] state: The State of the BackupPlan.
:param pulumi.Input[str] state_reason: Detailed description of why BackupPlan is in its current state.
:param pulumi.Input[str] uid: Server generated, unique identifier of UUID format.
"""
if backup_config is not None:
pulumi.set(__self__, "backup_config", backup_config)
if backup_schedule is not None:
pulumi.set(__self__, "backup_schedule", backup_schedule)
if cluster is not None:
pulumi.set(__self__, "cluster", cluster)
if deactivated is not None:
pulumi.set(__self__, "deactivated", deactivated)
if description is not None:
pulumi.set(__self__, "description", description)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if protected_pod_count is not None:
pulumi.set(__self__, "protected_pod_count", protected_pod_count)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
if state is not None:
pulumi.set(__self__, "state", state)
if state_reason is not None:
pulumi.set(__self__, "state_reason", state_reason)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="backupConfig")
def backup_config(self) -> Optional[pulumi.Input['BackupPlanBackupConfigArgs']]:
"""
Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_config")
@backup_config.setter
def backup_config(self, value: Optional[pulumi.Input['BackupPlanBackupConfigArgs']]):
pulumi.set(self, "backup_config", value)
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> Optional[pulumi.Input['BackupPlanBackupScheduleArgs']]:
"""
Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_schedule")
@backup_schedule.setter
def backup_schedule(self, value: Optional[pulumi.Input['BackupPlanBackupScheduleArgs']]):
pulumi.set(self, "backup_schedule", value)
@property
@pulumi.getter
def cluster(self) -> Optional[pulumi.Input[str]]:
"""
The source cluster from which Backups will be created via this BackupPlan.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter
def deactivated(self) -> Optional[pulumi.Input[bool]]:
"""
This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
"""
return pulumi.get(self, "deactivated")
@deactivated.setter
def deactivated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deactivated", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User specified descriptive string for this BackupPlan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
etag is used for optimistic concurrency control as a way to help prevent simultaneous
updates of a backup plan from overwriting each other. It is strongly suggested that
systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates
in order to avoid race conditions: An etag is returned in the response to backupPlans.get,
and systems are expected to put that etag in the request to backupPlans.patch or
backupPlans.delete to ensure that their change will be applied to the same version of the resource.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The region of the Backup Plan.
- - -
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The full name of the BackupPlan Resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="protectedPodCount")
def protected_pod_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.
"""
return pulumi.get(self, "protected_pod_count")
@protected_pod_count.setter
def protected_pod_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "protected_pod_count", value)
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']]:
"""
RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
"""
return pulumi.get(self, "retention_policy")
@retention_policy.setter
def retention_policy(self, value: Optional[pulumi.Input['BackupPlanRetentionPolicyArgs']]):
pulumi.set(self, "retention_policy", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The State of the BackupPlan.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="stateReason")
def state_reason(self) -> Optional[pulumi.Input[str]]:
"""
Detailed description of why BackupPlan is in its current state.
"""
return pulumi.get(self, "state_reason")
@state_reason.setter
def state_reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state_reason", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
Server generated, unique identifier of UUID format.
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
class BackupPlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_config: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupConfigArgs']]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupScheduleArgs']]] = None,
cluster: Optional[pulumi.Input[str]] = None,
deactivated: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
retention_policy: Optional[pulumi.Input[pulumi.InputType['BackupPlanRetentionPolicyArgs']]] = None,
__props__=None):
"""
Represents a Backup Plan instance.
To get more information about BackupPlan, see:
* [API documentation](https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/projects.locations.backupPlans)
* How-to Guides
* [Official Documentation](https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke)
## Example Usage
### Gkebackup Backupplan Basic
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
basic = gcp.gkebackup.BackupPlan("basic",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
all_namespaces=True,
))
```
### Gkebackup Backupplan Autopilot
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
enable_autopilot=True,
ip_allocation_policy=gcp.container.ClusterIpAllocationPolicyArgs(),
release_channel=gcp.container.ClusterReleaseChannelArgs(
channel="RAPID",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
autopilot = gcp.gkebackup.BackupPlan("autopilot",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
all_namespaces=True,
))
```
### Gkebackup Backupplan Cmek
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
key_ring = gcp.kms.KeyRing("keyRing", location="us-central1")
crypto_key = gcp.kms.CryptoKey("cryptoKey", key_ring=key_ring.id)
cmek = gcp.gkebackup.BackupPlan("cmek",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
selected_namespaces=gcp.gkebackup.BackupPlanBackupConfigSelectedNamespacesArgs(
namespaces=[
"default",
"test",
],
),
encryption_key=gcp.gkebackup.BackupPlanBackupConfigEncryptionKeyArgs(
gcp_kms_encryption_key=crypto_key.id,
),
))
```
### Gkebackup Backupplan Full
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
full = gcp.gkebackup.BackupPlan("full",
cluster=primary.id,
location="us-central1",
retention_policy=gcp.gkebackup.BackupPlanRetentionPolicyArgs(
backup_delete_lock_days=30,
backup_retain_days=180,
),
backup_schedule=gcp.gkebackup.BackupPlanBackupScheduleArgs(
cron_schedule="0 9 * * 1",
),
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
selected_applications=gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsArgs(
namespaced_names=[
gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsNamespacedNameArgs(
name="app1",
namespace="ns1",
),
gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsNamespacedNameArgs(
name="app2",
namespace="ns2",
),
],
),
))
```
## Import
BackupPlan can be imported using any of these accepted formats
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default projects/{{project}}/locations/{{location}}/backupPlans/{{name}}
```
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default {{project}}/{{location}}/{{name}}
```
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default {{location}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['BackupPlanBackupConfigArgs']] backup_config: Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['BackupPlanBackupScheduleArgs']] backup_schedule: Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
:param pulumi.Input[str] cluster: The source cluster from which Backups will be created via this BackupPlan.
:param pulumi.Input[bool] deactivated: This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
:param pulumi.Input[str] description: User specified descriptive string for this BackupPlan.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
:param pulumi.Input[str] location: The region of the Backup Plan.
- - -
:param pulumi.Input[str] name: The full name of the BackupPlan Resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[pulumi.InputType['BackupPlanRetentionPolicyArgs']] retention_policy: RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupPlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a Backup Plan instance.
To get more information about BackupPlan, see:
* [API documentation](https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/projects.locations.backupPlans)
* How-to Guides
* [Official Documentation](https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke)
## Example Usage
### Gkebackup Backupplan Basic
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
basic = gcp.gkebackup.BackupPlan("basic",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
all_namespaces=True,
))
```
### Gkebackup Backupplan Autopilot
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
enable_autopilot=True,
ip_allocation_policy=gcp.container.ClusterIpAllocationPolicyArgs(),
release_channel=gcp.container.ClusterReleaseChannelArgs(
channel="RAPID",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
autopilot = gcp.gkebackup.BackupPlan("autopilot",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
all_namespaces=True,
))
```
### Gkebackup Backupplan Cmek
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
key_ring = gcp.kms.KeyRing("keyRing", location="us-central1")
crypto_key = gcp.kms.CryptoKey("cryptoKey", key_ring=key_ring.id)
cmek = gcp.gkebackup.BackupPlan("cmek",
cluster=primary.id,
location="us-central1",
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
selected_namespaces=gcp.gkebackup.BackupPlanBackupConfigSelectedNamespacesArgs(
namespaces=[
"default",
"test",
],
),
encryption_key=gcp.gkebackup.BackupPlanBackupConfigEncryptionKeyArgs(
gcp_kms_encryption_key=crypto_key.id,
),
))
```
### Gkebackup Backupplan Full
```python
import pulumi
import pulumi_gcp as gcp
primary = gcp.container.Cluster("primary",
location="us-central1",
initial_node_count=1,
workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs(
workload_pool="my-project-name.svc.id.goog",
),
addons_config=gcp.container.ClusterAddonsConfigArgs(
gke_backup_agent_config=gcp.container.ClusterAddonsConfigGkeBackupAgentConfigArgs(
enabled=True,
),
))
full = gcp.gkebackup.BackupPlan("full",
cluster=primary.id,
location="us-central1",
retention_policy=gcp.gkebackup.BackupPlanRetentionPolicyArgs(
backup_delete_lock_days=30,
backup_retain_days=180,
),
backup_schedule=gcp.gkebackup.BackupPlanBackupScheduleArgs(
cron_schedule="0 9 * * 1",
),
backup_config=gcp.gkebackup.BackupPlanBackupConfigArgs(
include_volume_data=True,
include_secrets=True,
selected_applications=gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsArgs(
namespaced_names=[
gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsNamespacedNameArgs(
name="app1",
namespace="ns1",
),
gcp.gkebackup.BackupPlanBackupConfigSelectedApplicationsNamespacedNameArgs(
name="app2",
namespace="ns2",
),
],
),
))
```
## Import
BackupPlan can be imported using any of these accepted formats
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default projects/{{project}}/locations/{{location}}/backupPlans/{{name}}
```
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default {{project}}/{{location}}/{{name}}
```
```sh
$ pulumi import gcp:gkebackup/backupPlan:BackupPlan default {{location}}/{{name}}
```
:param str resource_name: The name of the resource.
:param BackupPlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupPlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_config: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupConfigArgs']]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupScheduleArgs']]] = None,
cluster: Optional[pulumi.Input[str]] = None,
deactivated: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
retention_policy: Optional[pulumi.Input[pulumi.InputType['BackupPlanRetentionPolicyArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackupPlanArgs.__new__(BackupPlanArgs)
__props__.__dict__["backup_config"] = backup_config
__props__.__dict__["backup_schedule"] = backup_schedule
if cluster is None and not opts.urn:
raise TypeError("Missing required property 'cluster'")
__props__.__dict__["cluster"] = cluster
__props__.__dict__["deactivated"] = deactivated
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
if location is None and not opts.urn:
raise TypeError("Missing required property 'location'")
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["retention_policy"] = retention_policy
__props__.__dict__["etag"] = None
__props__.__dict__["protected_pod_count"] = None
__props__.__dict__["state"] = None
__props__.__dict__["state_reason"] = None
__props__.__dict__["uid"] = None
super(BackupPlan, __self__).__init__(
'gcp:gkebackup/backupPlan:BackupPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backup_config: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupConfigArgs']]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupPlanBackupScheduleArgs']]] = None,
cluster: Optional[pulumi.Input[str]] = None,
deactivated: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protected_pod_count: Optional[pulumi.Input[int]] = None,
retention_policy: Optional[pulumi.Input[pulumi.InputType['BackupPlanRetentionPolicyArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
state_reason: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None) -> 'BackupPlan':
"""
Get an existing BackupPlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['BackupPlanBackupConfigArgs']] backup_config: Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['BackupPlanBackupScheduleArgs']] backup_schedule: Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
:param pulumi.Input[str] cluster: The source cluster from which Backups will be created via this BackupPlan.
:param pulumi.Input[bool] deactivated: This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
:param pulumi.Input[str] description: User specified descriptive string for this BackupPlan.
:param pulumi.Input[str] etag: etag is used for optimistic concurrency control as a way to help prevent simultaneous
updates of a backup plan from overwriting each other. It is strongly suggested that
systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates
in order to avoid race conditions: An etag is returned in the response to backupPlans.get,
and systems are expected to put that etag in the request to backupPlans.patch or
backupPlans.delete to ensure that their change will be applied to the same version of the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
:param pulumi.Input[str] location: The region of the Backup Plan.
- - -
:param pulumi.Input[str] name: The full name of the BackupPlan Resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[int] protected_pod_count: The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.
:param pulumi.Input[pulumi.InputType['BackupPlanRetentionPolicyArgs']] retention_policy: RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
:param pulumi.Input[str] state: The State of the BackupPlan.
:param pulumi.Input[str] state_reason: Detailed description of why BackupPlan is in its current state.
:param pulumi.Input[str] uid: Server generated, unique identifier of UUID format.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BackupPlanState.__new__(_BackupPlanState)
__props__.__dict__["backup_config"] = backup_config
__props__.__dict__["backup_schedule"] = backup_schedule
__props__.__dict__["cluster"] = cluster
__props__.__dict__["deactivated"] = deactivated
__props__.__dict__["description"] = description
__props__.__dict__["etag"] = etag
__props__.__dict__["labels"] = labels
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["protected_pod_count"] = protected_pod_count
__props__.__dict__["retention_policy"] = retention_policy
__props__.__dict__["state"] = state
__props__.__dict__["state_reason"] = state_reason
__props__.__dict__["uid"] = uid
return BackupPlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupConfig")
def backup_config(self) -> pulumi.Output[Optional['outputs.BackupPlanBackupConfig']]:
"""
Defines the configuration of Backups created via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_config")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> pulumi.Output[Optional['outputs.BackupPlanBackupSchedule']]:
"""
Defines a schedule for automatic Backup creation via this BackupPlan.
Structure is documented below.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def cluster(self) -> pulumi.Output[str]:
"""
The source cluster from which Backups will be created via this BackupPlan.
"""
return pulumi.get(self, "cluster")
@property
@pulumi.getter
def deactivated(self) -> pulumi.Output[bool]:
"""
This flag indicates whether this BackupPlan has been deactivated.
Setting this field to True locks the BackupPlan such that no further updates will be allowed
(except deletes), including the deactivated field itself. It also prevents any new Backups
from being created via this BackupPlan (including scheduled Backups).
"""
return pulumi.get(self, "deactivated")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
User specified descriptive string for this BackupPlan.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
etag is used for optimistic concurrency control as a way to help prevent simultaneous
updates of a backup plan from overwriting each other. It is strongly suggested that
systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates
in order to avoid race conditions: An etag is returned in the response to backupPlans.get,
and systems are expected to put that etag in the request to backupPlans.patch or
backupPlans.delete to ensure that their change will be applied to the same version of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Description: A set of custom labels supplied by the user.
A list of key->value pairs.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The region of the Backup Plan.
- - -
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The full name of the BackupPlan Resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="protectedPodCount")
def protected_pod_count(self) -> pulumi.Output[int]:
"""
The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.
"""
return pulumi.get(self, "protected_pod_count")
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> pulumi.Output[Optional['outputs.BackupPlanRetentionPolicy']]:
"""
RetentionPolicy governs lifecycle of Backups created under this plan.
Structure is documented below.
"""
return pulumi.get(self, "retention_policy")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The State of the BackupPlan.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateReason")
def state_reason(self) -> pulumi.Output[str]:
"""
Detailed description of why BackupPlan is in its current state.
"""
return pulumi.get(self, "state_reason")
@property
@pulumi.getter
def uid(self) -> pulumi.Output[str]:
"""
Server generated, unique identifier of UUID format.
"""
return pulumi.get(self, "uid")
|
7193b1ffa877dc1a8ebb952d9de048e5187b0472
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/ranking/migrations/0063_auto_20210828_2200.py
|
f7ce94d7ee2fdb196a31e487743d612d94bd46d9
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
0063_auto_20210828_2200.py
|
# Generated by Django 3.1.12 on 2021-08-28 22:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ranking', '0062_auto_20210828_2145'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='long_contest_delay',
),
migrations.AddField(
model_name='module',
name='long_contest_divider',
field=models.IntegerField(default=12),
),
]
|
477a912a124b5fbcc330c45d8d7715886c3eae5a
|
771c1e2011a85a287c766b1a3d299ced2e6f799f
|
/src/electionguard_gui/services/authorization_service.py
|
328111f74a86104bfefd56f3833ec23442e7348d
|
[
"MIT"
] |
permissive
|
microsoft/electionguard-python
|
f50f64a473a8d77984a2faf4aa8db40cebb5c201
|
b3ddc2a732f6c5f078a3afbe05b00d632a2ff5e0
|
refs/heads/main
| 2023-08-03T12:44:35.322716
| 2022-10-28T12:47:18
| 2022-10-28T12:47:18
| 246,392,956
| 143
| 117
|
MIT
| 2023-08-02T00:24:32
| 2020-03-10T19:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
authorization_service.py
|
from typing import Optional
import eel
from electionguard_gui.services.configuration_service import ConfigurationService
from electionguard_gui.services.service_base import ServiceBase
class AuthorizationService(ServiceBase):
"""Responsible for functionality related to authorization and user identify"""
_is_admin: bool
def __init__(self, config_service: ConfigurationService) -> None:
self._is_admin = config_service.get_is_admin()
# todo: replace state based storage with configparser https://docs.python.org/3/library/configparser.html
user_id: Optional[str] = None
def expose(self) -> None:
eel.expose(self.get_user_id)
eel.expose(self.set_user_id)
eel.expose(self.is_admin)
def get_required_user_id(self) -> str:
if self.user_id is None:
raise Exception("User must be logged in")
return self.user_id
def get_user_id(self) -> Optional[str]:
return self.user_id
def set_user_id(self, user_id: str) -> None:
self.user_id = user_id
def is_admin(self) -> bool:
return self._is_admin
|
f8418456d54478d5631dcf421737e02696d87acf
|
c6e6c564cf03427de02e78f436bdf7483e13402f
|
/tests/fixtures/formatter/formatter1.py
|
5c1e36fc56c490451c440b7eed474572dfe895e5
|
[
"MIT"
] |
permissive
|
wemake-services/wemake-python-styleguide
|
5a60ff468bf7877008c8ed34467da8bdbc2398f2
|
96e482514a60c12e99ee235337e678c9a4e484e3
|
refs/heads/master
| 2023-08-31T14:42:36.827760
| 2023-08-29T05:54:18
| 2023-08-29T05:54:18
| 124,593,057
| 2,427
| 572
|
MIT
| 2023-09-13T07:15:00
| 2018-03-09T21:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 52
|
py
|
formatter1.py
|
def s(handle: int) -> int:
return handle + 2_00
|
346bd5607e7a15a58c8fd717bdbb87b8f6aa9386
|
39b4397767371b6f978287e7bae80fc983a1163f
|
/nebula3/sclient/net/__init__.py
|
2723625ac94712a1850635af6c904e46eab37ef8
|
[] |
no_license
|
vesoft-inc/nebula-python
|
a8fa80a104a70a3ca3ba2cf1dccdc2c67905c2e1
|
1fe9a44aa7b0f9fdfc35fec12af63e30d7f3546e
|
refs/heads/master
| 2023-08-06T02:47:24.445679
| 2023-07-24T07:42:31
| 2023-07-24T07:42:31
| 214,361,893
| 163
| 76
| null | 2023-07-24T07:42:33
| 2019-10-11T06:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
__init__.py
|
#!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License.
import socket
from nebula3.Exception import InValidHostname
from nebula3.storage import GraphStorageService
from nebula3.fbthrift.transport import TSocket, THeaderTransport, TTransport
from nebula3.fbthrift.protocol import THeaderProtocol
class GraphStorageConnection(object):
def __init__(self, address, timeout, meta_cache):
self._address = address
self._timeout = timeout
self._meta_cache = meta_cache
self._connection = None
self._ip = ''
try:
self._ip = socket.gethostbyname(address.host)
if not isinstance(address.port, int):
raise RuntimeError('Wrong port type: {}'.format(type(address.port)))
except Exception:
raise InValidHostname(str(address.host))
def open(self):
try:
self.close()
s = TSocket.TSocket(self._address.host, self._address.port)
if self._timeout > 0:
s.setTimeout(self._timeout)
buffered_transport = TTransport.TBufferedTransport(s)
header_transport = THeaderTransport.THeaderTransport(buffered_transport)
protocol = THeaderProtocol.THeaderProtocol(header_transport)
header_transport.open()
self._connection = GraphStorageService.Client(protocol)
except Exception:
raise
def scan_vertex(self, req):
return self._connection.scanVertex(req)
def scan_edge(self, req):
return self._connection.scanEdge(req)
def storage_addr(self):
return self._address
def update_leader_info(self, space_id, part_id, address):
self._meta_cache.update_storage_leader(space_id, part_id, address)
def close(self):
try:
if self._connection is not None:
self._connection._iprot.trans.close()
except Exception:
raise
def __del__(self):
self.close()
|
924da93662e7cdc8bee9e6f27822fa91cc3309df
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/ConicalSurface.py
|
7156b4a71c4a6e8eab46632637b6e76354580aa7
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,303
|
py
|
ConicalSurface.py
|
class ConicalSurface(Surface,IDisposable):
""" A Conical Surface. """
@staticmethod
def Create(frameOfReference,halfAngle):
"""
Create(frameOfReference: Frame,halfAngle: float) -> ConicalSurface
Creates a conical surface defined by a local reference frame and a half angle.
frameOfReference: frameOfReference is an orthonormal frame that defines a local coordinate system
for the cone.
Frame.Origin is a point on the cylinder's axis.Frame.BasisZ
points along the axis,while Frame.BasisX and Frame.BasisY are orthogonal to
the axis. The frame may be either left-handed or right-handed (see
Frame.IsRightHanded). Note that
the "handedness" of the frame does not,by
itself,determine the surface's orientation.
halfAngle: Cone angle. Must be not 0,lesser than PI/2 and greater than -PI/2.
Returns: The created ConicalSurface.
"""
pass
def Dispose(self):
""" Dispose(self: Surface,A_0: bool) """
pass
def GetFrameOfReference(self):
"""
GetFrameOfReference(self: ConicalSurface) -> Frame
Returns frame of reference associated with this ConicalSurface.
Returns: Frame of reference associated with this ConicalSurface.
"""
pass
@staticmethod
def IsValidConeAngle(halfAngle):
"""
IsValidConeAngle(halfAngle: float) -> bool
Checks whether the input value lies is not 0,greater than -PI/2 and lesser
than PI/2.
halfAngle: Cone half-angle parameter.
Returns: True if input is not 0,lesser than PI/2 and greater than -PI/2,false
otherwise.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Surface,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Axis=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Axis of the cone. This is the Z axis of the local coordinate system associated with this cone.
Get: Axis(self: ConicalSurface) -> XYZ
"""
HalfAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Cone angle.
Get: HalfAngle(self: ConicalSurface) -> float
"""
Origin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Apex of the cone. This is the origin of the local coordinate system associated with this cone.
Get: Origin(self: ConicalSurface) -> XYZ
"""
XDir=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""X axis of the local coordinate system associated with this cone.
Get: XDir(self: ConicalSurface) -> XYZ
"""
YDir=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""X axis of the local coordinate system associated with this cone.
Get: YDir(self: ConicalSurface) -> XYZ
"""
|
069338d66832e80a124eef0371d6ad17a19ef54d
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/orchestration/dbnd-spark/src/dbnd_spark/spark_ctrl.py
|
1e582e93df2b372f7a88699bda004daed3a64240
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,576
|
py
|
spark_ctrl.py
|
# © Copyright Databand.ai, an IBM Company 2022
import shlex
from typing import List
import six
from dbnd import current
from dbnd._core.configuration.environ_config import (
DBND_TASK_RUN_ATTEMPT_UID,
ENV_DBND__CORE__PLUGINS,
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING,
ENV_DBND__TRACKING,
ENV_DBND_FIX_PYSPARK_IMPORTS,
get_dbnd_project_config,
)
from dbnd._core.plugin.dbnd_plugins import pm
from dbnd._core.task_run.task_run_ctrl import TaskRunCtrl
from dbnd._core.task_run.task_sync_ctrl import DisabledTaskSyncCtrl
from dbnd._core.utils.basics.cmd_line_builder import CmdLineBuilder
from dbnd_spark import SparkConfig
class SparkCtrl(TaskRunCtrl):
stop_spark_session_on_finish = False
def __init__(self, task_run):
super(SparkCtrl, self).__init__(task_run=task_run)
if self.config.disable_sync:
self.deploy = DisabledTaskSyncCtrl(task_run=task_run)
else:
self.deploy = self._get_deploy_ctrl()
def _get_deploy_ctrl(self):
return self.task_run.deploy
@property
def config(self):
# type: (SparkCtrl) -> SparkConfig
return self.task.spark_config
def run_pyspark(self, pyspark_script):
raise NotImplementedError("This engine doesn't support pyspark jobs")
def run_spark(self, main_class):
raise NotImplementedError("This engine doesn't support spark jobs")
# note that second variable should be changed on subclasses.
spark_application_logs = {
"YARN ResourceManager": ["http://", "<master>", ":8088"],
"YARN NodeManager": ["http://", "<core>", ":8088"],
"Hadoop HDFS NameNode": ["http://", "<master>", ":50070"],
"Spark HistoryServer": ["http://", "<master>", ":18080"],
"Ganglia": ["http://", "<master>", "/ganglia"],
}
def config_to_command_line(self):
# type: ()-> List[str]
config = self.config
deploy = self.deploy
cmd = CmdLineBuilder()
if config.conf:
for key, value in six.iteritems(config.conf):
cmd.option("--conf", "{}={}".format(str(key), str(value)))
cmd.option("--files", deploy.arg_files(config.files))
cmd.option("--py-files", deploy.arg_files(self.task.get_py_files()))
cmd.option("--archives", deploy.arg_files(config.archives))
cmd.option("--jars", deploy.arg_files(config.jars))
if config.driver_class_path:
cmd += ["--driver-class-path", config.driver_class_path]
cmd.option("--packages", config.packages)
cmd.option("--exclude-packages", config.exclude_packages)
cmd.option("--repositories", config.repositories)
cmd.option("--num-executors", config.num_executors)
cmd.option("--total-executor-cores", config.total_executor_cores)
cmd.option("--executor-cores", config.executor_cores)
cmd.option("--executor-memory", config.executor_memory)
cmd.option("--driver-memory", config.driver_memory)
cmd.option("--keytab", config.keytab)
cmd.option("--principal", config.principal)
cmd.option("--proxy-user", config.proxy_user)
cmd.option("--queue", config.queue)
cmd.option("--deploy-mode", config.deploy_mode)
cmd.option_bool("--verbose", config.verbose)
if config.submit_args:
cmd.add(*shlex.split(config.submit_args))
return cmd.get_cmd()
def sync(self, local_file):
return self.deploy.sync(local_file)
def _get_env_vars(self, conf_env_vars=None):
env_vars = {
DBND_TASK_RUN_ATTEMPT_UID: str(
current().current_task_run.task_run_attempt_uid
),
ENV_DBND__TRACKING: str(get_dbnd_project_config().is_tracking_mode()),
}
if conf_env_vars is None:
conf_env_vars = self.config.env_vars
if conf_env_vars:
env_vars.update(conf_env_vars)
if self.config.fix_pyspark_imports:
env_vars[ENV_DBND_FIX_PYSPARK_IMPORTS] = "True"
if self.config.disable_pluggy_entrypoint_loading:
# Disable pluggy loading for spark-submitted run
env_vars[ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING] = "True"
plugin_modules = [p[0].replace("-", "_") for p in pm.list_name_plugin()]
plugin_modules_formatted = ",".join(plugin_modules)
# Attach all loaded plugins to be manually loaded in submitted run
env_vars[ENV_DBND__CORE__PLUGINS] = plugin_modules_formatted
return env_vars
|
4761ed417dff9e496a152e1fc2c3a8da4a32c087
|
6186a3787d1e74f1866844491da48b9643c8f1a9
|
/ghostwriter/rolodex/forms_client.py
|
c43e27a3a8baa1caebb8b273aac1cf9a3f908c9d
|
[
"BSD-3-Clause"
] |
permissive
|
GhostManager/Ghostwriter
|
b46b2421e5737ed0afbf49182dce9eeb5eb31936
|
b9eae4459ba192fbb2d4a5b66f8210d57fd7112a
|
refs/heads/master
| 2023-09-04T02:34:54.085997
| 2023-07-13T22:38:44
| 2023-07-13T22:38:44
| 197,269,443
| 1,011
| 197
|
BSD-3-Clause
| 2023-09-08T00:19:52
| 2019-07-16T21:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 13,495
|
py
|
forms_client.py
|
"""This contains all client-related forms used by the Rolodex application."""
# Django Imports
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from django.utils.translation import gettext_lazy as _
# 3rd Party Libraries
from crispy_forms.bootstrap import Alert, FieldWithButtons, TabHolder
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
HTML,
Button,
ButtonHolder,
Column,
Div,
Field,
Layout,
Row,
Submit,
)
# Ghostwriter Libraries
from ghostwriter.commandcenter.models import GeneralConfiguration
from ghostwriter.modules.custom_layout_object import CustomTab, Formset
from ghostwriter.rolodex.models import Client, ClientContact, ClientNote
# Number of "extra" formsets created by default
# Higher numbers can increase page load times with WYSIWYG editors
EXTRAS = 0
class BaseClientContactInlineFormSet(BaseInlineFormSet):
"""
BaseInlineFormset template for :model:`rolodex.ClientContact` that adds validation
for this model.
"""
def clean(self):
contacts = []
duplicates = False
super().clean()
if any(self.errors):
return
for form in self.forms:
if form.cleaned_data:
# Only validate if the form is NOT marked for deletion
if form.cleaned_data["DELETE"] is False:
name = form.cleaned_data["name"]
job_title = form.cleaned_data["job_title"]
email = form.cleaned_data["email"]
# Check that the same person has not been added more than once
if name:
if name in contacts:
duplicates = True
contacts.append(name)
if duplicates:
form.add_error(
"name",
ValidationError(
_("This person is already assigned as a contact"),
code="duplicate",
),
)
# Raise an error if a name is provided without any required details
if name and any(x is None for x in [job_title, email]):
if not job_title:
form.add_error(
"job_title",
ValidationError(
_("This person is missing a job title / role"),
code="incomplete",
),
)
if not email:
form.add_error(
"email",
ValidationError(
_("This person is missing an email address"),
code="incomplete",
),
)
# Check that the email address is in a valid format
if email:
try:
validate_email(email)
except ValidationError:
form.add_error(
"email",
ValidationError(
_("Enter a valid email address for this contact"),
code="invalid",
),
)
class ClientContactForm(forms.ModelForm):
"""
Save an individual :model:`rolodex.ClientContact` associated with an individual
:model:`rolodex.Client`.
"""
class Meta:
model = ClientContact
exclude = ("client",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
general_config = GeneralConfiguration.get_solo()
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["name"].widget.attrs["placeholder"] = "Janine Melnitz"
self.fields["name"].label = "Full Name"
self.fields["email"].widget.attrs["placeholder"] = "info@getghostwriter.io"
self.fields["email"].label = "Email Address"
self.fields["job_title"].widget.attrs["placeholder"] = "COO"
self.fields["phone"].widget.attrs["placeholder"] = "(212) 897-1964"
self.fields["phone"].label = "Phone Number"
self.fields["note"].widget.attrs["placeholder"] = "Janine is our main contact for assessment work and ..."
self.fields["timezone"].initial = general_config.default_timezone
self.helper = FormHelper()
# Disable the <form> tags because this will be part of an instance of `ClientForm()`
self.helper.form_tag = False
# Disable CSRF so `csrfmiddlewaretoken` is not rendered multiple times
self.helper.disable_csrf = True
# Layout the form for Bootstrap
self.helper.layout = Layout(
# Wrap form in a div so Django renders form instances in their own element
Div(
# These Bootstrap alerts begin hidden and function as undo buttons for deleted forms
Alert(
content=(
"""
<strong>Contact Deleted!</strong>
Deletion will be permanent once the form is submitted. Click this alert to undo.
"""
),
css_class="alert alert-danger show formset-undo-button",
style="display:none; cursor:pointer;",
template="alert.html",
block=False,
dismiss=False,
),
Div(
HTML(
"""
<h6>Contact #<span class="counter">{{ forloop.counter }}</span></h6>
<hr>
"""
),
Row(
Column("name", css_class="form-group col-md-6 mb-0"),
Column("job_title", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column("email", css_class="form-group col-md-4 mb-0"),
Column("phone", css_class="form-group col-md-4 mb-0"),
Column("timezone", css_class="form-group col-md-4 mb-0"),
css_class="form-row",
),
"note",
Row(
Column(
Button(
"formset-del-button",
"Delete Contact",
css_class="btn-outline-danger formset-del-button col-4",
),
css_class="form-group col-6 offset-3",
),
Column(
Field(
"DELETE", style="display: none;", visibility="hidden", template="delete_checkbox.html"
),
css_class="form-group col-3 text-center",
),
),
css_class="formset",
),
css_class="formset-container",
)
)
# Create the ``inlineformset_factory()`` objects for ``ClientForm()``
ClientContactFormSet = inlineformset_factory(
Client,
ClientContact,
form=ClientContactForm,
formset=BaseClientContactInlineFormSet,
extra=0,
can_delete=True,
)
class ClientForm(forms.ModelForm):
"""
Save an individual :model:`rolodex.Client` with instances of :model:`rolodex.ClientContact`.
"""
class Meta:
model = Client
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
general_config = GeneralConfiguration.get_solo()
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["name"].widget.attrs["placeholder"] = "SpecterOps"
self.fields["short_name"].widget.attrs["placeholder"] = "Specter"
self.fields["note"].widget.attrs["placeholder"] = "This client approached us with concerns in these areas ..."
self.fields["address"].widget.attrs["placeholder"] = "14 N Moore St, New York, NY 10013"
self.fields["timezone"].initial = general_config.default_timezone
self.fields["tags"].widget.attrs["placeholder"] = "cybersecurity, industry:infosec, ..."
self.fields["note"].label = "Notes"
self.fields["tags"].label = "Tags"
# Design form layout with Crispy FormHelper
self.helper = FormHelper()
# Turn on <form> tags for this parent form
self.helper.form_tag = True
self.helper.form_method = "post"
self.helper.layout = Layout(
TabHolder(
CustomTab(
"Client Information",
HTML(
"""
<p class="form-spacer"></p>
"""
),
Row(
Column("name", css_class="form-group col-md-6 mb-0"),
Column("short_name", css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column("tags", css_class="form-group col-md-4 mb-0"),
Column(
FieldWithButtons(
"codename",
HTML(
"""
<button
class="btn btn-secondary js-roll-codename"
roll-codename-url="{% url 'rolodex:ajax_roll_codename' %}"
type="button"
>
<i class="fas fa-dice"></i>
</button>
"""
),
),
css_class="col-md-4",
),
Column("timezone", css_class="form-group col-md-4 mb-0"),
),
"address",
"note",
link_css_class="client-icon",
css_id="client",
),
CustomTab(
"Points of Contact",
HTML(
"""
<p class="form-spacer"></p>
"""
),
Formset("contacts", object_context_name="Contact"),
Button(
"add-contact",
"Add Contact",
css_class="btn-block btn-secondary formset-add-poc",
),
HTML(
"""
<p class="form-spacer"></p>
"""
),
link_css_class="poc-icon",
css_id="contacts",
),
template="tab.html",
css_class="nav-justified",
),
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
class ClientNoteForm(forms.ModelForm):
"""
Save an individual :model:`rolodex.ClientNote` associated with an individual
:model:`rolodex.Client`.
"""
class Meta:
model = ClientNote
fields = ("note",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "post"
self.helper.form_show_labels = False
self.helper.layout = Layout(
Div("note"),
ButtonHolder(
Submit("submit", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'"
class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
def clean_note(self):
note = self.cleaned_data["note"]
# Check if note is empty
if not note:
raise ValidationError(
_("You must provide some content for the note"),
code="required",
)
return note
|
faabb36d4c30580bec5dfc154546792a688a80fc
|
90d02fee4d02962c9e3d03314cd1597c70bf2f8c
|
/asdf/compression.py
|
8c0e2741d586c92134de1c5b5854a6c7bb819bfc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
asdf-format/asdf
|
08e19f5d603c738b0ae94ccd1a339ff6b8cf4209
|
a5b2b2d94f2fc71746f896c6d322439a27dd0bdd
|
refs/heads/main
| 2023-08-17T17:06:20.828932
| 2023-08-08T10:53:27
| 2023-08-08T10:53:27
| 18,112,754
| 328
| 25
|
BSD-3-Clause
| 2023-09-13T15:57:22
| 2014-03-25T19:00:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,587
|
py
|
compression.py
|
import bz2
import struct
import warnings
import zlib
import numpy as np
from .config import get_config
from .exceptions import AsdfWarning
def validate(compression):
"""
Validate the compression string.
Parameters
----------
compression : str, bytes or None
Returns
-------
compression : str or None
In canonical form.
Raises
------
ValueError
"""
if not compression or compression == b"\0\0\0\0":
return None
if isinstance(compression, bytes):
compression = compression.decode("ascii")
compression = compression.strip("\0")
builtin_labels = ["zlib", "bzp2", "lz4", "input"]
ext_labels = _get_all_compression_extension_labels()
all_labels = ext_labels + builtin_labels
# An extension is allowed to override a builtin compression or another extension,
# but let's warn the user of this.
# TODO: is this the desired behavior?
for i, label in enumerate(all_labels):
if label in all_labels[i + 1 :]:
warnings.warn(f'Found more than one compressor for "{label}"', AsdfWarning)
if compression not in all_labels:
msg = f"Supported compression types are: {all_labels}, not '{compression}'"
raise ValueError(msg)
return compression
class Lz4Compressor:
def __init__(self):
try:
import lz4.block
except ImportError as err:
msg = (
"lz4 library in not installed in your Python environment, "
"therefore the compressed block in this ASDF file "
"can not be decompressed."
)
raise ImportError(msg) from err
self._api = lz4.block
def compress(self, data, **kwargs):
kwargs["mode"] = kwargs.get("mode", "default")
compression_block_size = kwargs.pop("compression_block_size", 1 << 22)
nelem = compression_block_size // data.itemsize
for i in range(0, len(data), nelem):
_output = self._api.compress(data[i : i + nelem], **kwargs)
header = struct.pack("!I", len(_output))
yield header + _output
def decompress(self, blocks, out, **kwargs):
_size = 0
_pos = 0
_partial_len = b""
_buffer = None
bytesout = 0
for block in blocks:
cast = "c"
blk = memoryview(block).cast(cast) # don't copy on slice
while len(blk):
if not _size:
# Don't know the (compressed) length of this block yet
if len(_partial_len) + len(blk) < 4:
_partial_len += blk
break # we've exhausted the block
if _partial_len:
# If we started to fill a len key, finish filling it
remaining = 4 - len(_partial_len)
if remaining:
_partial_len += blk[:remaining]
blk = blk[remaining:]
_size = struct.unpack("!I", _partial_len)[0]
_partial_len = b""
else:
# Otherwise just read the len key directly
_size = struct.unpack("!I", blk[:4])[0]
blk = blk[4:]
if len(blk) < _size or _buffer is not None:
# If we have a partial block, or we're already filling a buffer, use the buffer
if _buffer is None:
_buffer = np.empty(
_size,
dtype=np.byte,
) # use numpy instead of bytearray so we can avoid zero initialization
_pos = 0
newbytes = min(_size - _pos, len(blk)) # don't fill past the buffer len!
_buffer[_pos : _pos + newbytes] = np.frombuffer(blk[:newbytes], dtype=np.byte)
_pos += newbytes
blk = blk[newbytes:]
if _pos == _size:
_out = self._api.decompress(_buffer, return_bytearray=True, **kwargs)
out[bytesout : bytesout + len(_out)] = _out
bytesout += len(_out)
_buffer = None
_size = 0
else:
# We have at least one full block
_out = self._api.decompress(memoryview(blk[:_size]), return_bytearray=True, **kwargs)
out[bytesout : bytesout + len(_out)] = _out
bytesout += len(_out)
blk = blk[_size:]
_size = 0
return bytesout
class ZlibCompressor:
def compress(self, data, **kwargs):
comp = zlib.compress(data, **kwargs)
yield comp
def decompress(self, blocks, out, **kwargs):
decompressor = zlib.decompressobj(**kwargs)
i = 0
for block in blocks:
decomp = decompressor.decompress(block)
out[i : i + len(decomp)] = decomp
i += len(decomp)
return i
class Bzp2Compressor:
def compress(self, data, **kwargs):
comp = bz2.compress(data, **kwargs)
yield comp
def decompress(self, blocks, out, **kwargs):
decompressor = bz2.BZ2Decompressor(**kwargs)
i = 0
for block in blocks:
decomp = decompressor.decompress(block)
out[i : i + len(decomp)] = decomp
i += len(decomp)
return i
def _get_compressor_from_extensions(compression, return_extension=False):
"""
Look at the loaded ASDF extensions and return the first one (if any)
that can handle this type of compression.
`return_extension` can be used to return corresponding extension for bookkeeping purposes.
Returns None if no match found.
"""
# TODO: in ASDF 3, this will be done by the ExtensionManager
extensions = get_config().extensions
for ext in extensions:
for comp in ext.compressors:
if compression == comp.label.decode("ascii"):
if return_extension:
return comp, ext
return comp
return None
def _get_all_compression_extension_labels():
"""
Get the list of compression labels supported via extensions
"""
# TODO: in ASDF 3, this will be done by the ExtensionManager
labels = []
extensions = get_config().extensions
for ext in extensions:
for comp in ext.compressors:
labels += [comp.label.decode("ascii")]
return labels
def _get_compressor(label):
ext_comp = _get_compressor_from_extensions(label)
if ext_comp is not None:
# Use an extension before builtins
comp = ext_comp
elif label == "zlib":
comp = ZlibCompressor()
elif label == "bzp2":
comp = Bzp2Compressor()
elif label == "lz4":
comp = Lz4Compressor()
else:
msg = f"Unknown compression type: '{label}'"
raise ValueError(msg)
return comp
def to_compression_header(compression):
"""
Converts a compression string to the four byte field in a block
header.
"""
if not compression:
return b""
if isinstance(compression, str):
return compression.encode("ascii")
return compression
def decompress(fd, used_size, data_size, compression, config=None):
"""
Decompress binary data in a file
Parameters
----------
fd : generic_io.GenericIO object
The file to read the compressed data from.
used_size : int
The size of the compressed data
data_size : int
The size of the uncompressed data
compression : str
The compression type used.
config : dict or None, optional
Any kwarg parameters to pass to the underlying decompression
function
Returns
-------
array : numpy.array
A flat uint8 containing the decompressed data.
"""
buffer = np.empty((data_size,), np.uint8)
compression = validate(compression)
decoder = _get_compressor(compression)
if config is None:
config = {}
blocks = fd.read_blocks(used_size) # data is a generator
len_decoded = decoder.decompress(blocks, out=buffer.data, **config)
if len_decoded != data_size:
msg = "Decompressed data wrong size"
raise ValueError(msg)
return buffer
def compress(fd, data, compression, config=None):
"""
Compress array data and write to a file.
Parameters
----------
fd : generic_io.GenericIO object
The file to write to.
data : buffer
The buffer of uncompressed data.
compression : str
The type of compression to use.
config : dict or None, optional
Any kwarg parameters to pass to the underlying compression
function
"""
compression = validate(compression)
encoder = _get_compressor(compression)
if config is None:
config = {}
# Get a contiguous, 1D memoryview of the underlying data, preserving data.itemsize
# - contiguous: because we may not want to assume that all compressors can handle arbitrary strides
# - 1D: so that len(data) works, not just data.nbytes
# - itemsize: should preserve data.itemsize for compressors that want to use the record size
# - memoryview: don't incur the expense of a memcpy, such as with tobytes()
data = memoryview(data)
if not data.contiguous:
data = memoryview(data.tobytes()) # make a contiguous copy
data = memoryview(np.frombuffer(data, dtype=data.format)) # get a 1D array that preserves byteorder
if not data.contiguous:
# the data will be contiguous by construction, but better safe than sorry!
raise ValueError(data.contiguous)
compressed = encoder.compress(data, **config)
# Write block by block
for comp in compressed:
fd.write(comp)
def get_compressed_size(data, compression, config=None):
"""
Returns the number of bytes required when the given data is
compressed.
Parameters
----------
See `compress()`.
Returns
-------
nbytes : int
The size of the compressed data
"""
class _ByteCountingFile:
def __init__(self):
self.count = 0
def write(self, data):
self.count += len(data)
bcf = _ByteCountingFile()
compress(bcf, data, compression, config=config)
return bcf.count
|
f443120953734e259b490c6c443b181347890a7c
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Salesforce/Integrations/SalesForceEventCollector/SalesForceEventCollector.py
|
31014fcb416c45e9b32b5bb7aead4e7d43c6c428
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,806
|
py
|
SalesForceEventCollector.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import urllib3
from collections.abc import Generator
import tempfile
import requests
import csv
from SiemApiModule import * # noqa: E402
urllib3.disable_warnings()
VENDOR = "salesforce"
PRODUCT = 'event-audit'
class SalesforceClient(IntegrationEventsClient):
def set_request_filter(self, after: str):
return
class SalesforceGetEvents(IntegrationGetEvents):
"""
A class to handle the flow of the integration
"""
def __init__(self, client: SalesforceClient, options: IntegrationOptions,
files_limit: int, query: str, after: str, last_id: str) -> None:
self.client: SalesforceClient = client
self.instance_url: str = ''
self.query: str = query
self.files_limit: int = files_limit
self.after: str = after
self.last_id: str = last_id
self.last_file: dict = {}
super().__init__(client, options)
def get_token(self):
res = self.client.call(self.client.request).json()
self.client.request.headers = {'Authorization': f"Bearer {res.get('access_token')}"}
self.instance_url = res.get('instance_url')
def pull_log_files(self):
query = f'{self.query}+and+CreatedDate+>+{self.after} limit {self.files_limit}'
demisto.info('Searching files last modified from {}'.format(self.after))
url = f'https://um6.salesforce.com/services/data/v44.0/query?q={query}'
self.client.request.url = url
self.client.request.method = Method.GET
res = self.client.call(self.client.request).json()
return self.get_files_from_res(res)
def get_files_from_res(self, query_res):
files = query_res['records']
done_status = query_res['done']
while done_status is False:
query = query_res['nextRecordsUrl']
try:
self.client.request.url = f'{self.instance_url}{query}'
self.client.request.method = Method.GET
query_res = self.client.call(self.client.request).json()
except Exception as err:
demisto.error(f'File list getting failed: {err}')
done_status = query_res['done']
for file in query_res['records']:
files.append(file)
demisto.info('Total number of files is {}.'.format(len(files)))
# sort all files by date
files.sort(key=lambda k: dateparser.parse(k.get('LogDate')))
if not self.last_id:
return files
# filter only the files we already fetched to avoid duplicates
last_id_found = False
new_files = []
for file in files:
if last_id_found:
new_files.append(file)
if file['Id'] == self.last_id:
last_id_found = True
return new_files
def get_file_raw_lines(self, file_url, file_in_tmp_path):
url = f'{self.instance_url}{file_url}'
try:
r = requests.get(url, stream=True, headers=self.client.request.headers)
if r.status_code == 401:
self.get_token()
r = requests.get(url, stream=True, headers=self.client.request.headers)
with open(file_in_tmp_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if r.status_code == 200:
demisto.info(f'File successfully downloaded from url {url}')
else:
demisto.info(f'File downloading failed. {r.status_code} {r.text} {file_url}')
except Exception as err:
demisto.error(f'File downloading failed. {err} {file_url}')
@staticmethod
def gen_chunks_to_object(file_in_tmp_path, chunksize=100):
field_names = [name.lower() for name in list(csv.reader(open(file_in_tmp_path)))[0]]
field_names = [x if x != 'type' else 'type_' for x in field_names]
reader = csv.DictReader(open(file_in_tmp_path), fieldnames=field_names)
chunk: list = []
next(reader)
for index, line in enumerate(reader):
if index % chunksize == 0 and index > 0:
yield chunk
del chunk[:]
chunk.append(line)
yield chunk
def _iter_events(self) -> Generator:
self.get_token()
temp_dir = tempfile.TemporaryDirectory()
log_files = self.pull_log_files()
# save the last file to get the recent file id and the date we fetched
# to filter the only the new files in the next run
if log_files:
self.last_file = log_files[-1]
for line in log_files:
events_list = []
local_filename = line["LogFile"].replace('/', '_').replace(':', '_')
file_in_tmp_path = "{}/{}".format(temp_dir.name, local_filename)
self.get_file_raw_lines(line["LogFile"], file_in_tmp_path)
for chunk in self.gen_chunks_to_object(file_in_tmp_path=file_in_tmp_path, chunksize=2000):
events_list.extend(chunk)
yield events_list
def get_last_run_details(self) -> dict:
"""
Get the log time and the file id to prevent duplications in the next run
"""
last_file = self.last_file
if last_file:
last_timestamp = last_file['LogDate']
timestamp = dateparser.parse(last_timestamp)
if timestamp is None:
raise TypeError('Failed to parse LogDate')
return {'after': timestamp.strftime("%Y-%m-%dT%H:%M:%SZ"),
'last_id': last_file['Id']}
return {}
@staticmethod
def get_last_run(events: list) -> dict:
return {}
def get_timestamp_format(value):
timestamp: Optional[datetime]
if isinstance(value, int):
value = str(value)
if not isinstance(value, datetime):
timestamp = dateparser.parse(value)
if timestamp is None:
raise TypeError(f'after is not a valid time {value}')
return timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
def main():
# Args is always stronger. Get last run even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
demisto_params['client_id'] = demisto_params['client_id']['password']
demisto_params['client_secret'] = demisto_params['client_secret']['password']
demisto_params['password'] = demisto_params['credentials']['password']
demisto_params['username'] = demisto_params['credentials']['identifier']
files_limit = int(demisto_params.get('files_limit'))
should_push_events = argToBoolean(demisto_params.get('should_push_events', 'false'))
demisto_params['method'] = Method.POST
request = IntegrationHTTPRequest(**demisto_params)
# add the params to the url in order to make the request without decoding the params
url = urljoin(demisto_params.get("url"), 'services/oauth2/token')
request.url = f'{url}?grant_type=password&' \
f'client_id={demisto_params.get("client_id")}&' \
f'client_secret={demisto_params.get("client_secret")}&' \
f'username={demisto_params.get("username")}&' \
f'password={demisto_params.get("password")}'
options = IntegrationOptions.parse_obj(demisto_params)
client = SalesforceClient(request, options)
after = get_timestamp_format(demisto_params.get('after'))
get_events = SalesforceGetEvents(client, options, files_limit, demisto_params.get('query'),
after, demisto_params.get('last_id'))
command = demisto.command()
try:
if command == 'test-module':
get_events.files_limit = 1
get_events.run()
return_results('ok')
elif command in ('salesforce-get-events', 'fetch-events'):
events = get_events.run()
if command == 'fetch-events':
send_events_to_xsiam(events, vendor=VENDOR, product=PRODUCT)
demisto.setLastRun(get_events.get_last_run_details())
elif command == 'salesforce-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('salesforce audit Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
if should_push_events:
send_events_to_xsiam(events, vendor=VENDOR, product=PRODUCT)
except Exception as e:
return_error(str(e))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
388354aab2b8a17913c698e7cc29009e77d03a3f
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/google_maps/__init__.py
|
929df26fa0f389fe526042c6177c327973586757
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 33
|
py
|
__init__.py
|
"""The google_maps component."""
|
31d62f7dc4836803af8a6046e662320764f624b7
|
ea1089efcc1a67b1d2352f1a650d8c27cf22585b
|
/django_jinja/builtins/filters.py
|
3a4bfef977b11c9cbbe471f76085887fb85a5c13
|
[
"BSD-2-Clause"
] |
permissive
|
niwinz/django-jinja
|
8646fced6a441b0c1e1de7464b099fde056d7ee2
|
59fec9e388eca203a3d73a5133f8a1130aee07bd
|
refs/heads/master
| 2023-09-04T02:56:41.733130
| 2023-09-03T21:00:00
| 2023-09-03T21:00:00
| 3,247,274
| 232
| 95
|
BSD-3-Clause
| 2023-09-03T20:06:38
| 2012-01-23T14:34:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
filters.py
|
from django.utils.encoding import force_str
from django.urls import reverse as django_reverse
from django.contrib.staticfiles.storage import staticfiles_storage
def reverse(value, *args, **kwargs):
"""
Shortcut filter for reverse url on templates. Is a alternative to
django {% url %} tag, but more simple.
Usage example:
{{ 'web:timeline'|reverse(userid=2) }}
This is a equivalent to django:
{% url 'web:timeline' userid=2 %}
"""
return django_reverse(value, args=args, kwargs=kwargs)
def static(path):
return staticfiles_storage.url(path)
from django.template.defaultfilters import addslashes
from django.template.defaultfilters import capfirst
from django.utils.html import escapejs as escapejs_filter
# from django.utils.html import fix_ampersands as fix_ampersands_filter
from django.template.defaultfilters import floatformat
from django.template.defaultfilters import iriencode
from django.template.defaultfilters import linenumbers
from django.template.defaultfilters import make_list
from django.template.defaultfilters import stringformat
from django.template.defaultfilters import title
from django.template.defaultfilters import truncatechars
from django.template.defaultfilters import truncatechars_html
from django.template.defaultfilters import truncatewords
from django.template.defaultfilters import truncatewords_html
from django.template.defaultfilters import upper
from django.template.defaultfilters import lower
from django.template.defaultfilters import urlencode
from django.template.defaultfilters import urlize
from django.template.defaultfilters import urlizetrunc
from django.template.defaultfilters import wordcount
from django.template.defaultfilters import wordwrap
from django.template.defaultfilters import ljust
from django.template.defaultfilters import rjust
from django.template.defaultfilters import center
from django.template.defaultfilters import cut
from django.template.defaultfilters import linebreaks_filter
from django.template.defaultfilters import linebreaksbr
from django.template.defaultfilters import striptags
from django.template.defaultfilters import join
from django.template.defaultfilters import length
from django.template.defaultfilters import random
from django.template.defaultfilters import add
from django.template.defaultfilters import date
from django.template.defaultfilters import time
from django.template.defaultfilters import timesince_filter
from django.template.defaultfilters import timeuntil_filter
from django.template.defaultfilters import default
from django.template.defaultfilters import default_if_none
from django.template.defaultfilters import divisibleby
from django.template.defaultfilters import yesno
from django.template.defaultfilters import filesizeformat
from django.template.defaultfilters import pprint
from django.template.defaultfilters import pluralize
from django.template.defaultfilters import json_script
from django.utils.text import slugify as djslugify
def slugify(value):
return djslugify(force_str(value))
from functools import partial
linebreaksbr = partial(linebreaksbr, autoescape=True)
# TZ
from django.templatetags.tz import do_timezone as timezone
from django.templatetags.tz import localtime
from django.templatetags.tz import utc
|
3dc1d78ddb55e4b8fa623c5cfa14acd275cc948b
|
409b066a195db9bb6c65abcfbc1c312b077522b4
|
/doc/_source/codes/scheme_D1Q3_wave.py
|
4cd2f7e1dbbe5165ebeb07ab4deb1776667def36
|
[
"BSD-3-Clause"
] |
permissive
|
pylbm/pylbm
|
41311bc817219e9d8e4b8b4983865c556edbdc1b
|
4876d32a67c98cefa50ebb85aa3c232605edc8ea
|
refs/heads/master
| 2023-07-19T04:35:23.545111
| 2023-07-11T16:43:09
| 2023-07-11T16:43:09
| 61,373,685
| 137
| 42
|
NOASSERTION
| 2023-07-11T16:43:11
| 2016-06-17T12:58:15
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 623
|
py
|
scheme_D1Q3_wave.py
|
# Authors:
# Loic Gouarin <loic.gouarin@polytechnique.edu>
# Benjamin Graille <benjamin.graille@math.u-psud.fr>
#
# License: BSD 3 clause
"""
Example of a D1Q3 for the wave equation
"""
import sympy as sp
import pylbm
u, v, X = sp.symbols("u, v, X")
c = 0.5
d = {
"dim": 1,
"scheme_velocity": 1.0,
"schemes": [
{
"velocities": [0, 1, 2],
"conserved_moments": [u, v],
"polynomials": [1, X, 0.5 * X**2],
"equilibrium": [u, v, 0.5 * c**2 * u],
"relaxation_parameters": [0.0, 0.0, 1.9],
},
],
}
s = pylbm.Scheme(d)
print(s)
|
db899652522c76d29da72aa46452517b49e72c09
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Validation/RecoHI/python/TrackValidationHeavyIons_cff.py
|
0b15dd38c78fe46a64775db31d6f9bd08476daff
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
TrackValidationHeavyIons_cff.py
|
import FWCore.ParameterSet.Config as cms
# track associator settings
import SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi
# to do the track<->TP association with TrackerHitAssociator
trackAssociatorByHitsRecoDenom = SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi.quickTrackAssociatorByHitsTrackerHitAssociator.clone()
from SimGeneral.TrackingAnalysis.trackingParticleNumberOfLayersProducer_cff import *
# reco track quality cuts
from Validation.RecoTrack.cuts_cff import *
cutsRecoTracks.src = "hiGeneralTracks"
cutsRecoTracks.ptMin = 2.0
cutsRecoTracks.quality = []
# high purity selection
cutsRecoTracksHP = cutsRecoTracks.clone( quality = cms.vstring("highPurity") )
# sim track quality cuts
from Validation.RecoHI.selectSimTracks_cff import *
findableSimTracks.ptMin = 2.0
# setup multi-track validator
from Validation.RecoTrack.MultiTrackValidator_cff import *
hiTrackValidator = multiTrackValidator.clone(
associators = ["trackAssociatorByHitsRecoDenom"],
UseAssociators = True,
label_tp_effic = "primaryChgSimTracks",
label_tp_fake = "cutsTPFake",
label_tp_effic_refvector = True,
label_tp_fake_refvector = True,
signalOnlyTP = False,
trackCollectionForDrCalculation = "cutsRecoTracks",
minpT = cms.double(1.0),
maxpT = cms.double(100.0),
nintpT = cms.int32(40),
useLogPt = cms.untracked.bool(True),
cores = ""
)
hiTrackValidator.label = cms.VInputTag(cms.InputTag('cutsRecoTracks'),
cms.InputTag('cutsRecoTracksHP')
)
# track prevalidation
hiTrackPrevalidation = cms.Sequence(
primaryChgSimTracks
* cutsTPFake
* cutsRecoTracks
* cutsRecoTracksHP
* trackingParticleNumberOfLayersProducer
)
# track validation sequence
hiTrackValidation = cms.Sequence( trackAssociatorByHitsRecoDenom * hiTrackValidator )
|
9e9c8b374a6c836acf7831595e7d5e4a99b91ddb
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/mmpose/codecs/utils/post_processing.py
|
75356388dc408d8dda0a72324aa16c3b4f3b6068
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,054
|
py
|
post_processing.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from itertools import product
from typing import Tuple
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
def get_simcc_normalized(batch_pred_simcc, sigma=None):
"""Normalize the predicted SimCC.
Args:
batch_pred_simcc (torch.Tensor): The predicted SimCC.
sigma (float): The sigma of the Gaussian distribution.
Returns:
torch.Tensor: The normalized SimCC.
"""
B, K, _ = batch_pred_simcc.shape
# Scale and clamp the tensor
if sigma is not None:
batch_pred_simcc = batch_pred_simcc / (sigma * np.sqrt(np.pi * 2))
batch_pred_simcc = batch_pred_simcc.clamp(min=0)
# Compute the binary mask
mask = (batch_pred_simcc.amax(dim=-1) > 1).reshape(B, K, 1)
# Normalize the tensor using the maximum value
norm = (batch_pred_simcc / batch_pred_simcc.amax(dim=-1).reshape(B, K, 1))
# Apply normalization
batch_pred_simcc = torch.where(mask, norm, batch_pred_simcc)
return batch_pred_simcc
def get_simcc_maximum(simcc_x: np.ndarray,
simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get maximum response location and value from simcc representations.
Note:
instance number: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx)
simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (N, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (N, K)
"""
assert isinstance(simcc_x, np.ndarray), ('simcc_x should be numpy.ndarray')
assert isinstance(simcc_y, np.ndarray), ('simcc_y should be numpy.ndarray')
assert simcc_x.ndim == 2 or simcc_x.ndim == 3, (
f'Invalid shape {simcc_x.shape}')
assert simcc_y.ndim == 2 or simcc_y.ndim == 3, (
f'Invalid shape {simcc_y.shape}')
assert simcc_x.ndim == simcc_y.ndim, (
f'{simcc_x.shape} != {simcc_y.shape}')
if simcc_x.ndim == 3:
N, K, Wx = simcc_x.shape
simcc_x = simcc_x.reshape(N * K, -1)
simcc_y = simcc_y.reshape(N * K, -1)
else:
N = None
x_locs = np.argmax(simcc_x, axis=1)
y_locs = np.argmax(simcc_y, axis=1)
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
max_val_x = np.amax(simcc_x, axis=1)
max_val_y = np.amax(simcc_y, axis=1)
mask = max_val_x > max_val_y
max_val_x[mask] = max_val_y[mask]
vals = max_val_x
locs[vals <= 0.] = -1
if N:
locs = locs.reshape(N, K, 2)
vals = vals.reshape(N, K)
return locs, vals
def get_heatmap_maximum(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get maximum response location and value from heatmaps.
Note:
batch_size: B
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray): Heatmaps in shape (K, H, W) or (B, K, H, W)
Returns:
tuple:
- locs (np.ndarray): locations of maximum heatmap responses in shape
(K, 2) or (B, K, 2)
- vals (np.ndarray): values of maximum heatmap responses in shape
(K,) or (B, K)
"""
assert isinstance(heatmaps,
np.ndarray), ('heatmaps should be numpy.ndarray')
assert heatmaps.ndim == 3 or heatmaps.ndim == 4, (
f'Invalid shape {heatmaps.shape}')
if heatmaps.ndim == 3:
K, H, W = heatmaps.shape
B = None
heatmaps_flatten = heatmaps.reshape(K, -1)
else:
B, K, H, W = heatmaps.shape
heatmaps_flatten = heatmaps.reshape(B * K, -1)
y_locs, x_locs = np.unravel_index(
np.argmax(heatmaps_flatten, axis=1), shape=(H, W))
locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32)
vals = np.amax(heatmaps_flatten, axis=1)
locs[vals <= 0.] = -1
if B:
locs = locs.reshape(B, K, 2)
vals = vals.reshape(B, K)
return locs, vals
def gaussian_blur(heatmaps: np.ndarray, kernel: int = 11) -> np.ndarray:
"""Modulate heatmap distribution with Gaussian.
Note:
- num_keypoints: K
- heatmap height: H
- heatmap width: W
Args:
heatmaps (np.ndarray[K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray ([K, H, W]): Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
K, H, W = heatmaps.shape
for k in range(K):
origin_max = np.max(heatmaps[k])
dr = np.zeros((H + 2 * border, W + 2 * border), dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[k].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[k] = dr[border:-border, border:-border].copy()
heatmaps[k] *= origin_max / np.max(heatmaps[k])
return heatmaps
def gaussian_blur1d(simcc: np.ndarray, kernel: int = 11) -> np.ndarray:
"""Modulate simcc distribution with Gaussian.
Note:
- num_keypoints: K
- simcc length: Wx
Args:
simcc (np.ndarray[K, Wx]): model predicted simcc.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the simcc gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray ([K, Wx]): Modulated simcc distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
N, K, Wx = simcc.shape
for n, k in product(range(N), range(K)):
origin_max = np.max(simcc[n, k])
dr = np.zeros((1, Wx + 2 * border), dtype=np.float32)
dr[0, border:-border] = simcc[n, k].copy()
dr = cv2.GaussianBlur(dr, (kernel, 1), 0)
simcc[n, k] = dr[0, border:-border].copy()
simcc[n, k] *= origin_max / np.max(simcc[n, k])
return simcc
def batch_heatmap_nms(batch_heatmaps: Tensor, kernel_size: int = 5):
"""Apply NMS on a batch of heatmaps.
Args:
batch_heatmaps (Tensor): batch heatmaps in shape (B, K, H, W)
kernel_size (int): The kernel size of the NMS which should be
a odd integer. Defaults to 5
Returns:
Tensor: The batch heatmaps after NMS.
"""
assert isinstance(kernel_size, int) and kernel_size % 2 == 1, \
f'The kernel_size should be an odd integer, got {kernel_size}'
padding = (kernel_size - 1) // 2
maximum = F.max_pool2d(
batch_heatmaps, kernel_size, stride=1, padding=padding)
maximum_indicator = torch.eq(batch_heatmaps, maximum)
batch_heatmaps = batch_heatmaps * maximum_indicator.float()
return batch_heatmaps
|
6446ab1bac4caafc5701ea56ad638ca476793496
|
45bb84e25010002d53cb2c21606d4095c0afe185
|
/examples/gromov/plot_gnn_TFGW.py
|
de745031d4c5a6949cc26e440ea6d12fd4bbf2af
|
[
"MIT"
] |
permissive
|
PythonOT/POT
|
63d3a8cbdf913fb18320df5f89e2e37e5c5d71d2
|
533148009dd9fa49f64f0c7af21e298e0150ca9a
|
refs/heads/master
| 2023-09-04T06:01:30.388482
| 2023-08-30T15:08:24
| 2023-08-30T15:08:24
| 71,472,695
| 1,417
| 489
|
MIT
| 2023-09-06T11:43:48
| 2016-10-20T14:42:14
|
Python
|
UTF-8
|
Python
| false
| false
| 7,436
|
py
|
plot_gnn_TFGW.py
|
# -*- coding: utf-8 -*-
"""
==============================
Graph classification with Tempate Based Fused Gromov Wasserstein
==============================
This example first illustrates how to train a graph classification gnn based on the Template Fused Gromov Wasserstein layer as proposed in [52] .
[53] C. Vincent-Cuaz, R. Flamary, M. Corneli, T. Vayer, N. Courty (2022).Template based graph neural network with optimal transport distances. Advances in Neural Information Processing Systems, 35.
"""
# Author: Sonia Mazelet <sonia.mazelet@ens-paris-saclay.fr>
# Rémi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 1
#%%
import matplotlib.pyplot as pl
import torch
import networkx as nx
from torch.utils.data import random_split
from torch_geometric.loader import DataLoader
from torch_geometric.utils import to_networkx, one_hot
from torch_geometric.utils import stochastic_blockmodel_graph as sbm
from torch_geometric.data import Data as GraphData
import torch.nn as nn
from torch_geometric.nn import Linear, GCNConv
from ot.gnn import TFGWPooling
from sklearn.manifold import TSNE
##############################################################################
# Generate data
# -------------
# parameters
# We create 2 classes of stochastic block models (SBM) graphs with 1 block and 2 blocks respectively.
torch.manual_seed(0)
n_graphs = 50
n_nodes = 10
n_node_classes = 2
#edge probabilities for the SBMs
P1 = [[0.8]]
P2 = [[0.9, 0.1], [0.1, 0.9]]
#block sizes
block_sizes1 = [n_nodes]
block_sizes2 = [n_nodes // 2, n_nodes // 2]
#node features
x1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
x1 = one_hot(x1, num_classes=n_node_classes)
x1 = torch.reshape(x1, (n_nodes, n_node_classes))
x2 = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
x2 = one_hot(x2, num_classes=n_node_classes)
x2 = torch.reshape(x2, (n_nodes, n_node_classes))
graphs1 = [GraphData(x=x1, edge_index=sbm(block_sizes1, P1), y=torch.tensor([0])) for i in range(n_graphs)]
graphs2 = [GraphData(x=x2, edge_index=sbm(block_sizes2, P2), y=torch.tensor([1])) for i in range(n_graphs)]
graphs = graphs1 + graphs2
#split the data into train and test sets
train_graphs, test_graphs = random_split(graphs, [n_graphs, n_graphs])
train_loader = DataLoader(train_graphs, batch_size=10, shuffle=True)
test_loader = DataLoader(test_graphs, batch_size=10, shuffle=False)
#%%
##############################################################################
# Plot data
# ---------
# plot one graph of each class
fontsize = 10
pl.figure(0, figsize=(8, 2.5))
pl.clf()
pl.subplot(121)
pl.axis('off')
pl.title('Graph of class 1', fontsize=fontsize)
G = to_networkx(graphs1[0], to_undirected=True)
pos = nx.spring_layout(G, seed=0)
nx.draw_networkx(G, pos, with_labels=False, node_color="tab:blue")
pl.subplot(122)
pl.axis('off')
pl.title('Graph of class 2', fontsize=fontsize)
G = to_networkx(graphs2[0], to_undirected=True)
pos = nx.spring_layout(G, seed=0)
nx.draw_networkx(G, pos, with_labels=False, nodelist=[0, 1, 2, 3, 4], node_color="tab:blue")
nx.draw_networkx(G, pos, with_labels=False, nodelist=[5, 6, 7, 8, 9], node_color="tab:red")
pl.tight_layout()
pl.show()
#%%
##############################################################################
# Pooling architecture using the TFGW layer
# ---------
class pooling_TFGW(nn.Module):
"""
Pooling architecture using the TFGW layer.
"""
def __init__(self, n_features, n_templates, n_template_nodes, n_classes, n_hidden_layers, feature_init_mean=0., feature_init_std=1.):
"""
Pooling architecture using the TFGW layer.
"""
super().__init__()
self.n_templates = n_templates
self.n_template_nodes = n_template_nodes
self.n_hidden_layers = n_hidden_layers
self.n_features = n_features
self.conv = GCNConv(self.n_features, self.n_hidden_layers)
self.TFGW = TFGWPooling(self.n_hidden_layers, self.n_templates, self.n_template_nodes, feature_init_mean=feature_init_mean, feature_init_std=feature_init_std)
self.linear = Linear(self.n_templates, n_classes)
def forward(self, x, edge_index, batch=None):
x = self.conv(x, edge_index)
x = self.TFGW(x, edge_index, batch)
x_latent = x # save latent embeddings for visualization
x = self.linear(x)
return x, x_latent
##############################################################################
# Graph classification training
# ---------
n_epochs = 25
#store latent embeddings and classes for TSNE visualization
embeddings_for_TSNE = []
classes = []
model = pooling_TFGW(n_features=2, n_templates=2, n_template_nodes=2, n_classes=2, n_hidden_layers=2, feature_init_mean=0.5, feature_init_std=0.5)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=0.0005)
criterion = torch.nn.CrossEntropyLoss()
all_accuracy = []
all_loss = []
for epoch in range(n_epochs):
losses = []
accs = []
for data in train_loader:
out, latent_embedding = model(data.x, data.edge_index, data.batch)
loss = criterion(out, data.y)
loss.backward()
optimizer.step()
pred = out.argmax(dim=1)
train_correct = pred == data.y
train_acc = int(train_correct.sum()) / len(data)
accs.append(train_acc)
losses.append(loss.item())
#store last classes and embeddings for TSNE visualization
if epoch == n_epochs - 1:
embeddings_for_TSNE.append(latent_embedding)
classes.append(data.y)
print(f'Epoch: {epoch:03d}, Loss: {torch.mean(torch.tensor(losses)):.4f},Train Accuracy: {torch.mean(torch.tensor(accs)):.4f}')
all_accuracy.append(torch.mean(torch.tensor(accs)))
all_loss.append(torch.mean(torch.tensor(losses)))
pl.figure(1, figsize=(8, 2.5))
pl.clf()
pl.subplot(121)
pl.plot(all_loss)
pl.xlabel('epochs')
pl.title('Loss')
pl.subplot(122)
pl.plot(all_accuracy)
pl.xlabel('epochs')
pl.title('Accuracy')
pl.tight_layout()
pl.show()
#Test
test_accs = []
for data in test_loader:
out, latent_embedding = model(data.x, data.edge_index, data.batch)
pred = out.argmax(dim=1)
test_correct = pred == data.y
test_acc = int(test_correct.sum()) / len(data)
test_accs.append(test_acc)
embeddings_for_TSNE.append(latent_embedding)
classes.append(data.y)
classes = torch.hstack(classes)
print(f'Test Accuracy: {torch.mean(torch.tensor(test_acc)):.4f}')
#%%
##############################################################################
# TSNE visualization of graph classification
# ---------
indices = torch.randint(2 * n_graphs, (60,)) # select a subset of embeddings for TSNE visualization
latent_embeddings = torch.vstack(embeddings_for_TSNE).detach().numpy()[indices, :]
TSNE_embeddings = TSNE(n_components=2, perplexity=20, random_state=1).fit_transform(latent_embeddings)
class_0 = classes[indices] == 0
class_1 = classes[indices] == 1
TSNE_embeddings_0 = TSNE_embeddings[class_0, :]
TSNE_embeddings_1 = TSNE_embeddings[class_1, :]
pl.figure(2, figsize=(6, 2.5))
pl.scatter(TSNE_embeddings_0[:, 0], TSNE_embeddings_0[:, 1],
alpha=0.5, marker='o', label='class 1')
pl.scatter(TSNE_embeddings_1[:, 0], TSNE_embeddings_1[:, 1],
alpha=0.5, marker='o', label='class 2')
pl.legend()
pl.title('TSNE in the latent space after training')
pl.show()
# %%
|
ae7af1bed411fe352871c89dee42a2f046718429
|
63ace5832d453e325681d02f6496a0999b72edcb
|
/bip_utils/utils/crypto/aes_ecb.py
|
62ac596b594470b2d4691dc7cb99beaf4853dc01
|
[
"MIT"
] |
permissive
|
ebellocchia/bip_utils
|
c9ec04c687f4247e57434319e36b2abab78f0b32
|
d15c75ddd74e4838c396a0d036ef6faf11b06a4b
|
refs/heads/master
| 2023-09-01T13:38:55.567370
| 2023-08-16T17:04:14
| 2023-08-16T17:04:14
| 251,130,186
| 244
| 88
|
MIT
| 2023-08-23T13:46:19
| 2020-03-29T20:42:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,891
|
py
|
aes_ecb.py
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for AES-ECB encryption/decryption."""
#
# Imports
#
from typing import Any, Union
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
from bip_utils.utils.misc.algo import AlgoUtils
#
# Classes
#
class AesEcbEncrypter:
"""
AES-ECB encrypter class.
It encrypts data using AES-ECB algorithm.
"""
aes: Any
auto_pad: bool
# Constructor
def __init__(self,
key: Union[str, bytes]) -> None:
"""
Construct class.
Args:
key (str or bytes): AES key
"""
self.aes = AES.new(AlgoUtils.Encode(key), AES.MODE_ECB)
self.auto_pad = True
def AutoPad(self,
value: bool) -> None:
"""
Set the auto-pad flag.
Args:
value (bool): Flag value
"""
self.auto_pad = value
def Encrypt(self,
data: Union[str, bytes]) -> bytes:
"""
Encrypt data using AES-ECB algorithm.
Args:
data (str or bytes): Data to be encrypted
Returns:
bytes: Encrypted data
"""
padded_data = self.Pad(data) if self.auto_pad else AlgoUtils.Encode(data)
return self.aes.encrypt(padded_data)
@staticmethod
def Pad(data: Union[str, bytes]) -> bytes:
"""
Pad data using PKCS7 algorithm.
Args:
data (str or bytes): Data to be padded
Returns:
bytes: Padded data
"""
return pad(AlgoUtils.Encode(data), AES.block_size)
class AesEcbDecrypter:
"""
AES-ECB decrypter class.
It decrypts data using AES-ECB algorithm.
"""
aes: Any
def __init__(self,
key: Union[str, bytes]) -> None:
"""
Construct class.
Args:
key (str or bytes): AES key
"""
self.aes = AES.new(AlgoUtils.Encode(key), AES.MODE_ECB)
self.auto_unpad = True
def AutoUnPad(self,
value: bool) -> None:
"""
Set the auto-unpad flag.
Args:
value (bool): Flag value
"""
self.auto_unpad = value
def Decrypt(self,
data: bytes) -> bytes:
"""
Decrypt data using AES-ECB algorithm.
Args:
data (bytes): Data to be decrypted
Returns:
bytes: Decrypted data
"""
dec = self.aes.decrypt(data)
return self.UnPad(dec) if self.auto_unpad else dec
@staticmethod
def UnPad(data: bytes) -> bytes:
"""
Unpad data using PKCS7 algorithm.
Args:
data (bytes): Data to be unpadded
Returns:
bytes: Unpadded data
"""
return unpad(data, AES.block_size)
|
c932516022c02ef9a2f011efc0f2a39565b8d8c3
|
c6f236361649a4bf56576fcb499ca80b4b00bc7f
|
/tests/instancemethods_test.py
|
297ca46df29a907333349e895b92d539c0330305
|
[
"MIT"
] |
permissive
|
kaste/mockito-python
|
e3ae81630f3123aed5ef1cbcb247e61a6bf63f9d
|
7483a5f76c7e7fb8121c2dc129bfb147a24e8eca
|
refs/heads/master
| 2022-10-09T04:37:02.227871
| 2022-09-30T21:07:49
| 2022-09-30T21:07:49
| 56,911,241
| 111
| 18
|
MIT
| 2022-08-25T20:46:41
| 2016-04-23T09:23:52
|
Python
|
UTF-8
|
Python
| false
| false
| 14,407
|
py
|
instancemethods_test.py
|
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
from .test_base import TestBase
from mockito import (
mock, when, expect, unstub, ANY, verify, verifyNoMoreInteractions,
verifyZeroInteractions, verifyNoUnwantedInteractions,
verifyStubbedInvocationsAreUsed)
from mockito.invocation import InvocationError
from mockito.verification import VerificationError
pytestmark = pytest.mark.usefixtures("unstub")
class Dog(object):
def waggle(self):
return "Wuff!"
def bark(self, sound):
return "%s!" % sound
def do_default_bark(self):
return self.bark('Wau')
def __call__(self):
pass
class InstanceMethodsTest(TestBase):
def tearDown(self):
unstub()
def testUnstubClassMethod(self):
original_method = Dog.waggle
when(Dog).waggle().thenReturn('Nope!')
unstub()
rex = Dog()
self.assertEqual('Wuff!', rex.waggle())
self.assertEqual(original_method, Dog.waggle)
def testUnstubMockedInstanceMethod(self):
rex = Dog()
when(rex).waggle().thenReturn('Nope!')
assert rex.waggle() == 'Nope!'
unstub()
assert rex.waggle() == 'Wuff!'
def testUnstubMockedInstanceDoesNotHideTheClass(self):
when(Dog).waggle().thenReturn('Nope!')
rex = Dog()
when(rex).waggle().thenReturn('Sure!')
assert rex.waggle() == 'Sure!'
unstub()
assert rex.waggle() == 'Wuff!'
def testPartialUnstubShowsTheMockedClass(self):
when(Dog).waggle().thenReturn('Nope!')
rex = Dog()
when(rex).waggle().thenReturn('Sure!')
unstub(rex)
assert rex.waggle() == 'Nope!'
def testStubAnInstanceMethod(self):
when(Dog).waggle().thenReturn('Boing!')
rex = Dog()
self.assertEqual('Boing!', rex.waggle())
def testStubsAnInstanceMethodWithAnArgument(self):
when(Dog).bark('Miau').thenReturn('Wuff')
rex = Dog()
self.assertEqual('Wuff', rex.bark('Miau'))
def testInvokeAStubbedMethodFromAnotherMethod(self):
when(Dog).bark('Wau').thenReturn('Wuff')
rex = Dog()
self.assertEqual('Wuff', rex.do_default_bark())
verify(Dog).bark('Wau')
def testYouCantStubAnUnknownMethodInStrictMode(self):
try:
when(Dog).barks('Wau').thenReturn('Wuff')
self.fail(
'Stubbing an unknown method should have thrown a exception')
except InvocationError:
pass
def testStubUnknownMethodInLooseMode(self):
when(Dog, strict=False).walk()
rex = Dog()
rex.walk()
unstub()
with pytest.raises(AttributeError):
rex.walk
with pytest.raises(AttributeError):
Dog.walk
def testAddNewMethodOnInstanceInLooseMode(self):
rex = Dog()
when(rex, strict=False).walk()
rex.walk()
unstub()
with pytest.raises(AttributeError):
rex.walk
def testThrowEarlyIfCallingWithUnexpectedArgumentsInStrictMode(self):
rex = Dog()
when(rex).bark('Miau').thenReturn('Wuff')
with pytest.raises(InvocationError):
rex.bark('Shhh')
def testNiceErrorMessageOnUnexpectedCall(self):
theMock = mock(strict=True)
when(theMock).foo('bar')
when(theMock).foo(12, baz='boz')
when(theMock).bar('foo') # <==== omitted from output!
with pytest.raises(InvocationError) as exc:
theMock.foo(True, None)
assert str(exc.value) == '''
Called but not expected:
foo(True, None)
Stubbed invocations are:
foo('bar')
foo(12, baz='boz')
'''
def testStubCallableObject(self):
when(Dog).__call__().thenReturn('done')
rex = Dog() # <= important. not stubbed
assert rex() == 'done'
def testReturnNoneIfCallingWithUnexpectedArgumentsIfNotStrict(self):
when(Dog, strict=False).bark('Miau').thenReturn('Wuff')
rex = Dog()
self.assertEqual(None, rex.bark('Shhh'))
def testStubInstancesInsteadOfClasses(self):
rex = Dog()
when(rex).bark('Miau').thenReturn('Wuff')
self.assertEqual('Wuff', rex.bark('Miau'))
verify(rex, times=1).bark(ANY)
max = Dog()
self.assertEqual('Miau!', max.bark('Miau'))
def testUnstubInstance(self):
rex = Dog()
when(rex).bark('Miau').thenReturn('Wuff')
unstub()
assert rex.bark('Miau') == 'Miau!'
def testNoExplicitReturnValueMeansNone(self):
when(Dog).bark('Miau').thenReturn()
rex = Dog()
self.assertEqual(None, rex.bark('Miau'))
def testForgottenThenReturnMeansReturnNone(self):
when(Dog).bark('Miau')
when(Dog).waggle()
rex = Dog()
self.assertEqual(None, rex.bark('Miau'))
self.assertEqual(None, rex.waggle())
class TestVerifyInteractions:
class TestZeroInteractions:
def testVerifyNoMoreInteractionsWorks(self):
when(Dog).bark('Miau')
verifyNoMoreInteractions(Dog)
def testVerifyZeroInteractionsWorks(self):
when(Dog).bark('Miau')
verifyZeroInteractions(Dog)
class TestOneInteraction:
def testNothingVerifiedVerifyNoMoreInteractionsRaises(self):
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyNoMoreInteractions(Dog)
def testIfVerifiedVerifyNoMoreInteractionsPasses(self):
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
verify(Dog).bark('Miau')
verifyNoMoreInteractions(Dog)
def testNothingVerifiedVerifyZeroInteractionsRaises(self):
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyZeroInteractions(Dog)
def testIfVerifiedVerifyZeroInteractionsStillRaises(self):
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
verify(Dog).bark('Miau')
with pytest.raises(VerificationError):
verifyZeroInteractions(Dog)
class TestEnsureStubsAreUsed:
def testBarkOnUnusedStub(self):
when(Dog).bark('Miau')
with pytest.raises(VerificationError):
verifyStubbedInvocationsAreUsed(Dog)
class TestPassIfExplicitlyVerified:
@pytest.mark.parametrize('verification', [
{'times': 0},
{'between': [0, 3]}
])
def testPassIfExplicitlyVerified(self, verification):
dog = mock()
when(dog).waggle().thenReturn('Sure')
verify(dog, **verification).waggle()
verifyStubbedInvocationsAreUsed(dog)
def testWildcardCallSignatureOnVerify(self):
dog = mock()
when(dog).waggle(1).thenReturn('Sure')
verify(dog, times=0).waggle(Ellipsis)
verifyStubbedInvocationsAreUsed(dog)
@pytest.mark.xfail(reason='Not implemented.')
def testPassIfVerifiedZeroInteractions(self):
dog = mock()
when(dog).waggle(1).thenReturn('Sure')
verifyZeroInteractions(dog)
verifyStubbedInvocationsAreUsed(dog)
@pytest.mark.xfail(reason='Not implemented.')
def testPassIfVerifiedNoMoreInteractions(self):
dog = mock()
when(dog).waggle(1).thenReturn('Sure')
verifyNoMoreInteractions(dog)
verifyStubbedInvocationsAreUsed(dog)
def testWildacardCallSignatureOnStub(self):
dog = mock()
when(dog).waggle(Ellipsis).thenReturn('Sure')
verify(dog, times=0).waggle(1)
verifyStubbedInvocationsAreUsed(dog)
def testPassIfExplicitlyVerified4(self):
dog = mock()
when(dog).waggle(1).thenReturn('Sure')
when(dog).waggle(2).thenReturn('Sure')
verify(dog, times=0).waggle(Ellipsis)
verifyStubbedInvocationsAreUsed(dog)
class TestPassIfImplicitlyVerifiedViaExpect:
@pytest.mark.parametrize('verification', [
{'times': 0},
{'between': [0, 3]}
])
def testPassIfImplicitlyVerified(self, verification):
dog = mock()
expect(dog, **verification).waggle().thenReturn('Sure')
verifyStubbedInvocationsAreUsed(dog)
def testPassUsedOnceImplicitAnswer(self):
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
verifyStubbedInvocationsAreUsed(Dog)
def testPassUsedOnce(self):
dog = mock()
when(dog).waggle().thenReturn('Sure')
dog.waggle()
verifyStubbedInvocationsAreUsed(dog)
def testFailSecondStubNotUsed(self):
when(Dog).bark('Miau')
when(Dog).waggle()
rex = Dog()
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyStubbedInvocationsAreUsed(Dog)
def testFailSecondStubSameMethodUnused(self):
when(Dog).bark('Miau')
when(Dog).bark('Grrr')
rex = Dog()
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyStubbedInvocationsAreUsed(Dog)
def testPassTwoStubsOnSameMethodUsed(self):
when(Dog).bark('Miau')
when(Dog).bark('Grrr')
rex = Dog()
rex.bark('Miau')
rex.bark('Grrr')
verifyStubbedInvocationsAreUsed(Dog)
def testPassOneCatchAllOneSpecificStubBothUsed(self):
when(Dog).bark(Ellipsis)
when(Dog).bark('Miau')
rex = Dog()
rex.bark('Miau')
rex.bark('Grrr')
verifyStubbedInvocationsAreUsed(Dog)
def testFailSecondAnswerUnused(self):
when(Dog).bark('Miau').thenReturn('Yep').thenReturn('Nop')
rex = Dog()
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyStubbedInvocationsAreUsed(Dog)
@pytest.mark.usefixtures('unstub')
class TestImplicitVerificationsUsingExpect:
@pytest.fixture(params=[
{'times': 2},
{'atmost': 2},
{'between': [1, 2]}
], ids=['times', 'atmost', 'between'])
def verification(self, request):
return request.param
def testFailImmediatelyIfWantedCountExceeds(self, verification):
rex = Dog()
expect(rex, **verification).bark('Miau').thenReturn('Wuff')
rex.bark('Miau')
rex.bark('Miau')
with pytest.raises(InvocationError):
rex.bark('Miau')
def testVerifyNoMoreInteractionsWorks(self, verification):
rex = Dog()
expect(rex, **verification).bark('Miau').thenReturn('Wuff')
rex.bark('Miau')
rex.bark('Miau')
verifyNoMoreInteractions(rex)
def testNoUnwantedInteractionsWorks(self, verification):
rex = Dog()
expect(rex, **verification).bark('Miau').thenReturn('Wuff')
rex.bark('Miau')
rex.bark('Miau')
verifyNoUnwantedInteractions(rex)
@pytest.mark.parametrize('verification', [
{'times': 2},
{'atleast': 2},
{'between': [1, 2]}
], ids=['times', 'atleast', 'between'])
def testVerifyNoMoreInteractionsBarksIfUnsatisfied(self, verification):
rex = Dog()
expect(rex, **verification).bark('Miau').thenReturn('Wuff')
with pytest.raises(VerificationError):
verifyNoMoreInteractions(rex)
@pytest.mark.parametrize('verification', [
{'times': 2},
{'atleast': 2},
{'between': [1, 2]}
], ids=['times', 'atleast', 'between'])
def testNoUnwantedInteractionsBarksIfUnsatisfied(self, verification):
rex = Dog()
expect(rex, **verification).bark('Miau').thenReturn('Wuff')
with pytest.raises(VerificationError):
verifyNoUnwantedInteractions(rex)
def testNoUnwantedInteractionsForAllRegisteredObjects(self):
rex = Dog()
mox = Dog()
expect(rex, times=1).bark('Miau')
expect(mox, times=1).bark('Miau')
rex.bark('Miau')
mox.bark('Miau')
verifyNoUnwantedInteractions()
def testUseWhenAndExpectTogetherVerifyNoUnwatedInteractions(self):
rex = Dog()
when(rex).waggle()
expect(rex, times=1).bark('Miau')
rex.waggle()
rex.bark('Miau')
verifyNoUnwantedInteractions()
def testExpectWitoutVerification(self):
rex = Dog()
expect(rex).bark('Miau').thenReturn('Wuff')
verifyNoMoreInteractions(rex)
rex.bark('Miau')
with pytest.raises(VerificationError):
verifyNoMoreInteractions(rex)
# Where to put this test? During first implementation I broke this
def testEnsureWhenGetsNotConfused(self):
m = mock()
when(m).foo(1).thenReturn()
m.foo(1)
with pytest.raises(VerificationError):
verifyNoMoreInteractions(m)
def testEnsureMultipleExpectsArentConfused(self):
rex = Dog()
expect(rex, times=1).bark('Miau').thenReturn('Wuff')
expect(rex, times=1).waggle().thenReturn('Wuff')
rex.bark('Miau')
rex.waggle()
|
ea9d1d3d4f1fc434c03b46bcf4060e4f22b22074
|
2d6d5424e881252898b898fbfbc47fe1487371cf
|
/examples/01-filter/image-fft.py
|
248fa52c8b547f7c7e3d44aa54cce26dbca11d9d
|
[
"MIT"
] |
permissive
|
pyvista/pyvista
|
333e55bfaa6b8bcdb47e2df04c823d35f05db364
|
1b450b23340f367315fc914075d551e0a4df8cc3
|
refs/heads/main
| 2023-08-20T08:04:27.146062
| 2023-08-20T01:14:03
| 2023-08-20T01:14:03
| 92,974,124
| 1,885
| 389
|
MIT
| 2023-09-14T21:09:28
| 2017-05-31T18:01:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,715
|
py
|
image-fft.py
|
"""
.. _image_fft_example:
Fast Fourier Transform
~~~~~~~~~~~~~~~~~~~~~~
This example shows how to apply a Fast Fourier Transform (FFT) to a
:class:`pyvista.ImageData` using :func:`pyvista.ImageDataFilters.fft`
filter.
Here, we demonstrate FFT usage by denoising an image, effectively removing any
"high frequency" content by performing a `low pass filter
<https://en.wikipedia.org/wiki/Low-pass_filter>`_.
This example was inspired by `Image denoising by FFT
<https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_fft_image_denoise.html>`_.
"""
import numpy as np
import pyvista as pv
from pyvista import examples
###############################################################################
# Load the example Moon landing image and plot it.
image = examples.download_moonlanding_image()
print(image.point_data)
# Create a theme that we can reuse when plotting the image
grey_theme = pv.themes.DocumentTheme()
grey_theme.cmap = 'gray'
grey_theme.show_scalar_bar = False
grey_theme.axes.show = False
image.plot(theme=grey_theme, cpos='xy', text='Unprocessed Moon Landing Image')
###############################################################################
# Apply FFT to the image
# ~~~~~~~~~~~~~~~~~~~~~~
# FFT will be applied to the active scalars, ``'PNGImage'``, the default
# scalars name when loading a PNG image.
#
# The output from the filter is a complex array stored by the same name unless
# specified using ``output_scalars_name``.
fft_image = image.fft()
fft_image.point_data
###############################################################################
# Plot the FFT of the image
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# Plot the absolute value of the FFT of the image.
#
# Note that we are effectively viewing the "frequency" of the data in this
# image, where the four corners contain the low frequency content of the image,
# and the middle is the high frequency content of the image.
fft_image.plot(
scalars=np.abs(fft_image.point_data['PNGImage']),
cpos="xy",
theme=grey_theme,
log_scale=True,
text='Moon Landing Image FFT',
copy_mesh=True, # don't overwrite scalars when plotting
)
###############################################################################
# Remove the noise from the ``fft_image``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Effectively, we want to remove high frequency (noisy) data from our image.
# First, let's reshape by the size of the image.
#
# Next, perform a low pass filter by removing the middle 80% of the content of
# the image. Note that the high frequency content is in the middle of the array.
#
# .. note::
# It is easier and more efficient to use the existing
# :func:`pyvista.ImageDataFilters.low_pass` filter. This section is here
# for demonstration purposes.
ratio_to_keep = 0.10
# modify the fft_image data
width, height, _ = fft_image.dimensions
data = fft_image['PNGImage'].reshape(height, width) # note: axes flipped
data[int(height * ratio_to_keep) : -int(height * ratio_to_keep)] = 0
data[:, int(width * ratio_to_keep) : -int(width * ratio_to_keep)] = 0
fft_image.plot(
scalars=np.abs(data),
cpos="xy",
theme=grey_theme,
log_scale=True,
text='Moon Landing Image FFT with Noise Removed',
copy_mesh=True, # don't overwrite scalars when plotting
)
###############################################################################
# Convert to the spatial domain using reverse FFT
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Finally, convert the image data back to the "spatial" domain and plot it.
rfft = fft_image.rfft()
rfft['PNGImage'] = np.real(rfft['PNGImage'])
rfft.plot(cpos="xy", theme=grey_theme, text='Processed Moon Landing Image')
|
6c0c5b23f0dc4f1b574b601b80549cbce00f19ba
|
ff465fb53545024d2718f64e1fdf72eb8e74aee4
|
/src/pyaC/importCheck.py
|
b7a85e3769513f6b1f93956f75c18ebb8f867d29
|
[
"MIT"
] |
permissive
|
sczesla/PyAstronomy
|
9232b4296a83e06ee2d514e39f6f7ee2e8c7d841
|
e85314678882624baf870443c670b4f5abb70e7d
|
refs/heads/master
| 2023-09-03T15:36:25.506441
| 2023-08-24T11:35:18
| 2023-08-24T11:35:18
| 8,031,607
| 129
| 44
| null | 2021-11-12T15:05:39
| 2013-02-05T15:21:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,265
|
py
|
importCheck.py
|
from . import pyaErrors as PE
import importlib
import traceback
def pyaimportallfrom(mn, pak, globs, cra=False, rf=False, re=False):
"""
Mimics the behavior of 'from X import *'.
Get some help here.
# https://stackoverflow.com/questions/21221358/python-how-to-import-all-methods-and-attributes-from-a-module-dynamically
Parameters
----------
mn : string
Module name
pak : string
Name of the package to which the imported
names are added.
globs : dictionary
The `globals()` dictionary of that package.
cra : boolean, optional
If True, symbol reassignments will be checked
and some info printed.
rf : boolean, optional
If True, a warning will be given on import failure.
Default is False.
re : boolean, optional
If False (default), no exception will be raised on
import failure (note, also `rf` must be set True to
make this happen).
Returns
-------
status : boolean
True if import was successful
exception : string
If an exception occurred, the text describing the exception. Otherwise
an empty string
traceback : list of strings
If an exception occurred, the traceback. Otherwise an empty string.
"""
try:
n = importlib.import_module("." + mn, pak)
try:
# If __all__ is defined, use it!
to_import = n.__all__
except AttributeError:
# Otherwise, import all names not starting with an underscore
to_import = [
name for name in n.__dict__ if not name.startswith('_')]
if cra:
# Check reassignment
for name in to_import:
if name in globs:
print("Reassigning: ", name, " from ", n)
globs.update({name: n.__dict__[name] for name in to_import})
except Exception as e:
if rf:
f = PE.PyAImportFailure("Could not import module " + str(mn) + " to package " + str(pak) + ". " +
"Received message: " + str(e))
if not re:
# Report failure without raising exception
PE.warn(f)
else:
# Raise exception
raise(f)
return False, str(e), [l.rstrip("\n") for l in traceback.format_exc().splitlines(True)]
return True, "", ""
class ImportCheck:
def __init__(self, modules, required=None):
"""
Checks whether individual modules can be imported.
Parameters
----------
modules : list of strings
The names of the modules to be checked.
required : list of strings, optional
List of modules which are required. If not found,
an import error is raised.
"""
if required is None:
required = []
# List of required modules that could not be imported,
# list of all modules that could not be imported
self.requiredFail = []
self.fail = []
self.check = {}
self.versions = {}
for module in modules:
self.check[module] = True
try:
mi = __import__(module)
except ImportError:
self.check[module] = False
self.fail.append(module)
except PE.PyARequiredImport:
self.check[module] = False
self.fail.append(module)
except Exception as e:
# In fact, any error prevents import
self.check[module] = False
self.fail.append(module)
self.versions[module] = "undefined"
if self.check[module]:
try:
self.versions[module] = mi.__version__
except:
pass
if (not self.check[module]) and (module in required):
self.requiredFail.append(module)
if len(self.requiredFail) > 0:
ms = ', '.join(self.requiredFail)
raise(PE.PyARequiredImport(
"Could not import required module(s): " + ms, solution="Please install " + ms))
|
8893fe387480b47190cd55f7f629a0c9f495b38d
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/verifications/tokens.py
|
c8bdbb94bf44fb9bc0ad9085796ad6926ffd7663
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 335
|
py
|
tokens.py
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
class EmailVerificationTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return f"{user.pk}{timestamp}{user.verification.email_is_verified}"
email_verification_token_generator = EmailVerificationTokenGenerator()
|
b9a6eb2628c99dcb6c55e12c42aada3dcbaeb98d
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Pdf_docx_pptx_xlsx_epub_png/source/setuptools/__init__.py
|
8188f1252884acac985fcf4c6c85e21db2177312
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,033
|
py
|
__init__.py
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import distutils.core
import distutils.filelist
from distutils.core import Command as _Command
from distutils.util import convert_path
from fnmatch import fnmatchcase
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
from setuptools.depends import Require
from setuptools.compat import filterfalse
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out)
@staticmethod
def require_parents(packages):
"""
Exclude any apparent package that apparently doesn't include its
parent.
For example, exclude 'foo.bar' if 'foo' is not present.
"""
found = []
for pkg in packages:
base, sep, child = pkg.rpartition('.')
if base and base not in found:
continue
found.append(pkg)
yield pkg
@staticmethod
def _all_dirs(base_path):
"""
Return all dirs in base_path, relative to base_path
"""
for root, dirs, files in os.walk(base_path, followlinks=True):
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path)
@classmethod
def _find_packages_iter(cls, base_path):
dirs = cls._all_dirs(base_path)
suitable = filterfalse(lambda n: '.' in n, dirs)
return (
path.replace(os.path.sep, '.')
for path in suitable
if cls._looks_like_package(os.path.join(base_path, path))
)
@staticmethod
def _looks_like_package(path):
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir, followlinks=True):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
distutils.filelist.findall = findall # fix findall bug in distutils.
|
d7eddeeab2ad0c3a66e20c26509c9ea383adde7f
|
7f0ed84404abb57c3bc062cd986b67c6a254d3f3
|
/tests/test_circular_imports.py
|
9fd74ebe4e5ee11c97ef9d09e688f1bb248127e8
|
[
"BSD-3-Clause"
] |
permissive
|
abhinavsingh/proxy.py
|
ad8eff50476815c4654cade3b6fe628e1ecea2eb
|
30574fd0414005dfa8792a6e797023e862bdcf43
|
refs/heads/develop
| 2023-09-01T03:40:13.473734
| 2023-04-17T04:12:18
| 2023-04-17T04:12:18
| 12,228,178
| 2,691
| 657
|
BSD-3-Clause
| 2023-09-08T11:56:39
| 2013-08-19T21:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,685
|
py
|
test_circular_imports.py
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
Tests for circular imports in all local packages and modules.
This ensures all internal packages can be imported right away without
any need to import some other module before doing so.
This module is based on an idea that pytest uses for self-testing:
* https://github.com/sanitizers/octomachinery/blob/be18b54/tests/circular_imports_test.py
* https://github.com/pytest-dev/pytest/blob/d18c75b/testing/test_meta.py
* https://twitter.com/codewithanthony/status/1229445110510735361
"""
import os
import sys
import pkgutil
import subprocess
from types import ModuleType
from typing import List, Generator
from pathlib import Path
from itertools import chain
import pytest
import proxy
def _find_all_importables(pkg: ModuleType) -> List[str]:
"""Find all importables in the project.
Return them in order.
"""
return sorted(
set(
chain.from_iterable(
_discover_path_importables(Path(p), pkg.__name__)
# FIXME: Unignore after upgrading to `mypy > 0.910`. The fix
# FIXME: is in the `master` branch of upstream since Aug 4,
# FIXME: 2021 but has not yet been included in any releases.
# Refs:
# * https://github.com/python/mypy/issues/1422
# * https://github.com/python/mypy/pull/9454
for p in pkg.__path__ # type: ignore[attr-defined]
),
),
)
def _discover_path_importables(
pkg_pth: Path, pkg_name: str,
) -> Generator[str, None, None]:
"""Yield all importables under a given path and package."""
for dir_path, _d, file_names in os.walk(pkg_pth):
pkg_dir_path = Path(dir_path)
if pkg_dir_path.parts[-1] == '__pycache__':
continue
if all(Path(_).suffix != '.py' for _ in file_names):
continue
rel_pt = pkg_dir_path.relative_to(pkg_pth)
pkg_pref = '.'.join((pkg_name,) + rel_pt.parts)
yield from (
pkg_path
for _, pkg_path, _ in pkgutil.walk_packages(
(str(pkg_dir_path),), prefix=f'{pkg_pref}.',
)
)
# FIXME: Ignore is necessary for as long as pytest hasn't figured out their
# FIXME: typing for the `parametrize` mark.
# Refs:
# * https://github.com/pytest-dev/pytest/issues/7469#issuecomment-918345196
# * https://github.com/pytest-dev/pytest/issues/3342
@pytest.mark.parametrize( # type: ignore[misc]
'import_path',
_find_all_importables(proxy),
)
# Marked as disabled_ because:
# 1. This test case was added when isort integration was problematic
# 2. This test case never found a real circular import scenario
# 3. This test case consumes 60% of test suite runtime
# 4. Kept in the repo because we might still want to enable
# this in future, conditionally. Example, we can run
# this only on a single OS and Python version combination
# instead of running it across entire matrix.
def disabled_test_no_warnings(import_path: str) -> None:
"""Verify that exploding importables doesn't explode.
This is seeking for any import errors including ones caused
by circular imports.
"""
imp_cmd = (
sys.executable,
'-W', 'error',
'-c', f'import {import_path!s}',
)
subprocess.check_call(imp_cmd)
|
f4aa01bf725a1d9ad20ffc8ca2b379e2bc9d5dc2
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster/_core/storage/alembic/versions/009_add_partition_column_postgres.py
|
b541b660b4fb31e337dc7084131cfc08e9680a1f
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
009_add_partition_column_postgres.py
|
"""add partition column.
Revision ID: 3e0770016702
Revises: 224640159acf
Create Date: 2020-12-21 10:13:54.430623
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy import inspect
# revision identifiers, used by Alembic.
revision = "3e0770016702"
down_revision = "224640159acf"
branch_labels = None
depends_on = None
def upgrade():
inspector = inspect(op.get_bind())
has_tables = inspector.get_table_names()
if "event_logs" in has_tables:
columns = [x.get("name") for x in inspector.get_columns("event_logs")]
if "partition" not in columns:
op.add_column("event_logs", sa.Column("partition", sa.String))
op.create_index(
"idx_asset_partition", "event_logs", ["asset_key", "partition"], unique=False
)
def downgrade():
inspector = inspect(op.get_bind())
has_tables = inspector.get_table_names()
if "event_logs" in has_tables:
columns = [x.get("name") for x in inspector.get_columns("event_logs")]
if "partition" in columns:
op.drop_column("event_logs", "partition")
op.drop_index("idx_asset_partition", "event_logs")
|
d1cafcfe5752cbe05f63a28958005535bf2610a6
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/staples.py
|
1cf6dcd2e5d22725d48bcec4c3c5753f808b8fca
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,442
|
py
|
staples.py
|
import re
import scrapy
from locations.hours import OpeningHours
from locations.items import Feature
from locations.spiders.vapestore_gb import clean_address
class StaplesSpider(scrapy.Spider):
name = "staples"
item_attributes = {"brand": "Staples", "brand_wikidata": "Q785943"}
allowed_domains = ["stores.staples.com"]
start_urls = ("https://stores.staples.com/",)
def parse_hours(self, elements):
opening_hours = OpeningHours()
for elem in elements:
day = elem.xpath('.//td[@class="c-hours-details-row-day"]/text()').extract_first()
intervals = elem.xpath('.//td[@class="c-hours-details-row-intervals"]')
if intervals.xpath("./text()").extract_first() == "Closed":
continue
if intervals.xpath("./span/text()").extract_first() == "Open 24 hours":
opening_hours.add_range(day=day, open_time="0:00", close_time="23:59")
else:
start_time = elem.xpath(
'.//span[@class="c-hours-details-row-intervals-instance-open"]/text()'
).extract_first()
end_time = elem.xpath(
'.//span[@class="c-hours-details-row-intervals-instance-close"]/text()'
).extract_first()
opening_hours.add_range(day=day, open_time=start_time, close_time=end_time, time_format="%I:%M %p")
return opening_hours
def parse_store(self, response):
ref = re.search(r".+/(.+)$", response.url).group(1)
address1 = response.xpath('//span[@class="c-address-street-1"]/text()').extract_first()
address2 = response.xpath('//span[@class="c-address-street-2"]/text()').extract_first() or ""
properties = {
"street_address": clean_address([address1, address2]),
"phone": response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
"city": response.xpath('//span[@class="c-address-city"]/text()').extract_first(),
"state": response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
"postcode": response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
"country": response.xpath('//abbr[@itemprop="addressCountry"]/text()').extract_first(),
"ref": ref,
"website": response.url,
"lat": float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
"lon": float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
"name": response.xpath('//h1[@itemprop="name"]/text()').extract_first(),
}
hours = self.parse_hours(response.xpath('//table[@class="c-hours-details"]//tbody/tr'))
if hours:
properties["opening_hours"] = hours
yield Feature(**properties)
def parse(self, response):
urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
is_store_list = response.xpath('//section[contains(@class,"LocationList")]').extract()
if not urls and is_store_list:
urls = response.xpath('//a[contains(@class,"Teaser-titleLink")]/@href').extract()
for url in urls:
if re.search(r".{2}/.+/.+", url):
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
else:
yield scrapy.Request(response.urljoin(url))
|
9525f6bd270f77b8069065a49fcf48933c4f9f42
|
01891a781b63c0ca4413e1bd9bbe1e4dee9d081f
|
/benchmarks/setup.py
|
73f83b12a26f64506c18c6d1e9517a729a09c17d
|
[
"MIT"
] |
permissive
|
dedupeio/dedupe
|
f933fa07cadb62c19c478cd801c8a3ff22f7563e
|
f72d4a161bfc66c9e1de9b39e2bd7e01bcad3c49
|
refs/heads/main
| 2023-08-23T15:18:36.902429
| 2023-02-17T16:34:52
| 2023-02-17T16:34:52
| 4,087,724
| 2,702
| 407
|
MIT
| 2023-05-29T02:57:35
| 2012-04-20T14:57:36
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
setup.py
|
# Dummy file to allow editable installs
from setuptools import find_packages, setup
if __name__ == "__main__":
setup(
name="benchmarks",
packages=find_packages(),
)
|
db77b30337ca69f0c72ba1a9052b28f54ad51287
|
20ef5a192d8fa9c1eb25b0198ba059e002036229
|
/aif360/sklearn/datasets/openml_datasets.py
|
4525087d80e8c71ea55c10ba48aff580a793578b
|
[
"Apache-2.0"
] |
permissive
|
Trusted-AI/AIF360
|
8ee9835e0c394f38ebb70f9351b113dac8710435
|
6f9972e4a7dbca2402f29b86ea67889143dbeb3e
|
refs/heads/master
| 2023-08-31T07:48:43.056722
| 2023-07-27T19:09:06
| 2023-07-27T19:09:06
| 145,761,123
| 1,157
| 432
|
Apache-2.0
| 2023-09-08T10:42:28
| 2018-08-22T20:47:15
|
Python
|
UTF-8
|
Python
| false
| false
| 10,742
|
py
|
openml_datasets.py
|
import os
import pandas as pd
from sklearn.datasets import fetch_openml
from aif360.sklearn.datasets.utils import standardize_dataset
# cache location
DATA_HOME_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'data', 'raw')
def fetch_adult(subset='all', *, data_home=None, cache=True, binary_race=True,
usecols=None, dropcols=None, numeric_only=False, dropna=True):
"""Load the Adult Census Income Dataset.
Binarizes 'race' to 'White' (privileged) or 'Non-white' (unprivileged). The
other protected attribute is 'sex' ('Male' is privileged and 'Female' is
unprivileged). The outcome variable is 'annual-income': '>50K' (favorable)
or '<=50K' (unfavorable).
Note:
By default, the data is downloaded from OpenML. See the `adult
<https://www.openml.org/d/1590>`_ page for details.
Args:
subset ({'train', 'test', or 'all'}, optional): Select the dataset to
load: 'train' for the training set, 'test' for the test set, 'all'
for both.
data_home (string, optional): Specify another download and cache folder
for the datasets. By default all AIF360 datasets are stored in
'aif360/sklearn/data/raw' subfolders.
cache (bool): Whether to cache downloaded datasets.
binary_race (bool, optional): Group all non-white races together. Only
the protected attribute is affected, not the feature column, unless
numeric_only is ``True``.
usecols (list-like, optional): Feature column(s) to keep. All others are
dropped.
dropcols (list-like, optional): Feature column(s) to drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs.
Returns:
namedtuple: Tuple containing X, y, and sample_weights for the Adult
dataset accessible by index or name.
See also:
:func:`sklearn.datasets.fetch_openml`
Examples:
>>> adult = fetch_adult()
>>> adult.X.shape
(45222, 13)
>>> adult_num = fetch_adult(numeric_only=True)
>>> adult_num.X.shape
(48842, 5)
"""
if subset not in {'train', 'test', 'all'}:
raise ValueError("subset must be either 'train', 'test', or 'all'; "
"cannot be {}".format(subset))
df = fetch_openml(data_id=1590, data_home=data_home or DATA_HOME_DEFAULT,
cache=cache, as_frame=True).frame
if subset == 'train':
df = df.iloc[16281:]
elif subset == 'test':
df = df.iloc[:16281]
df = df.rename(columns={'class': 'annual-income'}) # more descriptive name
df['annual-income'] = df['annual-income'].cat.reorder_categories(
['<=50K', '>50K'], ordered=True)
# binarize protected attributes
race = df.race.cat.set_categories(['Non-white', 'White'], ordered=True)
race = race.fillna('Non-white') if binary_race else 'race'
if numeric_only and binary_race:
df.race = race
race = 'race'
df.sex = df.sex.cat.reorder_categories(['Female', 'Male'], ordered=True)
return standardize_dataset(df, prot_attr=[race, 'sex'],
target='annual-income', sample_weight='fnlwgt',
usecols=usecols, dropcols=dropcols,
numeric_only=numeric_only, dropna=dropna)
def fetch_german(*, data_home=None, cache=True, binary_age=True, usecols=None,
dropcols=None, numeric_only=False, dropna=True):
"""Load the German Credit Dataset.
Protected attributes are 'sex' ('male' is privileged and 'female' is
unprivileged) and 'age' (binarized by default as recommended by
[#kamiran09]_: age >= 25 is considered privileged and age < 25 is considered
unprivileged; see the binary_age flag to keep this continuous). The outcome
variable is 'credit-risk': 'good' (favorable) or 'bad' (unfavorable).
Note:
By default, the data is downloaded from OpenML. See the `credit-g
<https://www.openml.org/d/31>`_ page for details.
Args:
data_home (string, optional): Specify another download and cache folder
for the datasets. By default all AIF360 datasets are stored in
'aif360/sklearn/data/raw' subfolders.
cache (bool): Whether to cache downloaded datasets.
binary_age (bool, optional): If ``True``, split protected attribute,
'age', into 'aged' (privileged) and 'youth' (unprivileged). The
'age' feature remains continuous.
usecols (list-like, optional): Column name(s) to keep. All others are
dropped.
dropcols (list-like, optional): Column name(s) to drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs.
Returns:
namedtuple: Tuple containing X and y for the German dataset accessible
by index or name.
See also:
:func:`sklearn.datasets.fetch_openml`
References:
.. [#kamiran09] `F. Kamiran and T. Calders, "Classifying without
discriminating," 2nd International Conference on Computer,
Control and Communication, 2009.
<https://ieeexplore.ieee.org/abstract/document/4909197>`_
Examples:
>>> german = fetch_german()
>>> german.X.shape
(1000, 21)
>>> german_num = fetch_german(numeric_only=True)
>>> german_num.X.shape
(1000, 7)
>>> X, y = fetch_german(numeric_only=True)
>>> y_pred = LogisticRegression().fit(X, y).predict(X)
>>> disparate_impact_ratio(y, y_pred, prot_attr='age', priv_group=True,
... pos_label='good')
0.9483094846144106
"""
df = fetch_openml(data_id=31, data_home=data_home or DATA_HOME_DEFAULT,
cache=cache, as_frame=True).frame
df = df.rename(columns={'class': 'credit-risk'}) # more descriptive name
df['credit-risk'] = df['credit-risk'].cat.reorder_categories(
['bad', 'good'], ordered=True)
# binarize protected attribute (but not corresponding feature)
age = (pd.cut(df.age, [0, 25, 100],
labels=False if numeric_only else ['young', 'aged'])
if binary_age else 'age')
# Note: marital_status directly implies sex. i.e. 'div/dep/mar' => 'female'
# and all others => 'male'
personal_status = df.pop('personal_status').str.split(expand=True)
personal_status.columns = ['sex', 'marital_status']
df = df.join(personal_status.astype('category'))
df.sex = df.sex.cat.reorder_categories(['female', 'male'], ordered=True)
df.foreign_worker = df.foreign_worker.astype('category').cat.set_categories(
['no', 'yes'], ordered=True)
return standardize_dataset(df, prot_attr=['sex', age, 'foreign_worker'],
target='credit-risk', usecols=usecols,
dropcols=dropcols, numeric_only=numeric_only,
dropna=dropna)
def fetch_bank(*, data_home=None, cache=True, binary_age=True, percent10=False,
usecols=None, dropcols=['duration'], numeric_only=False, dropna=False):
"""Load the Bank Marketing Dataset.
The protected attribute is 'age' (binarized by default as suggested by [#lequy22]:
age >= 25 and age <60 is considered privileged and age< 25 or age >= 60 unprivileged;
see the binary_age flag to keep this continuous). The outcome variable is 'deposit':
'yes' or 'no'.
References:
.. [#lequy22] Le Quy, Tai, et al. "A survey on datasets for fairness‐aware machine
learning." Wiley Interdisciplinary Reviews: Data Mining and Knowledge
Discovery 12.3 (2022): e1452.
Note:
By default, the data is downloaded from OpenML. See the `bank-marketing
<https://www.openml.org/d/1461>`_ page for details.
Args:
data_home (string, optional): Specify another download and cache folder
for the datasets. By default all AIF360 datasets are stored in
'aif360/sklearn/data/raw' subfolders.
cache (bool): Whether to cache downloaded datasets.
percent10 (bool, optional): Download the reduced version (10% of data).
usecols (list-like, optional): Column name(s) to keep. All others are
dropped.
dropcols (list-like, optional): Column name(s) to drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs. Note: this is False by default for
this dataset.
Returns:
namedtuple: Tuple containing X and y for the Bank dataset accessible by
index or name.
See also:
:func:`sklearn.datasets.fetch_openml`
Examples:
>>> bank = fetch_bank()
>>> bank.X.shape
(45211, 15)
>>> bank_nona = fetch_bank(dropna=True)
>>> bank_nona.X.shape
(7842, 15)
>>> bank_num = fetch_bank(numeric_only=True)
>>> bank_num.X.shape
(45211, 6)
"""
# TODO: this seems to be an old version
df = fetch_openml(data_id=1558 if percent10 else 1461, data_home=data_home
or DATA_HOME_DEFAULT, cache=cache, as_frame=True).frame
df.columns = ['age', 'job', 'marital', 'education', 'default', 'balance',
'housing', 'loan', 'contact', 'day', 'month', 'duration',
'campaign', 'pdays', 'previous', 'poutcome', 'deposit']
# remap target
df.deposit = df.deposit.map({'1': 'no', '2': 'yes'}).astype('category')
df.deposit = df.deposit.cat.set_categories(['no', 'yes'], ordered=True)
# replace 'unknown' marker with NaN
for col in df.select_dtypes('category'):
if 'unknown' in df[col].cat.categories:
df[col] = df[col].cat.remove_categories('unknown')
df.education = df.education.astype('category').cat.reorder_categories(
['primary', 'secondary', 'tertiary'], ordered=True)
# binarize protected attribute (but not corresponding feature)
age = (pd.cut(df.age, [0, 24, 60, 100], ordered=False,
labels=[0, 1, 0] if numeric_only
else ['<25 or >=60', '25-60', '<25 or >=60'])
if binary_age else 'age')
age = age.cat.reorder_categories([0, 1] if numeric_only
else ['<25 or >=60', '25-60'])
return standardize_dataset(df, prot_attr=[age], target='deposit',
usecols=usecols, dropcols=dropcols,
numeric_only=numeric_only, dropna=dropna)
|
7ba6250753ed26519d7add29d8b78313fb62d1a7
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/image/docker/test/test_schema1.py
|
4be2c12fdb7dc2af127ddd34401ef148092992d7
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,299
|
py
|
test_schema1.py
|
# -*- coding: utf-8 -*-
import json
import os
import re
import pytest
from app import docker_v2_signing_key
from image.docker.schema1 import (
DockerSchema1Manifest,
DockerSchema1ManifestBuilder,
MalformedSchema1Manifest,
)
from util.bytes import Bytes
@pytest.mark.parametrize(
"json_data",
[
"",
"{}",
"""
{
"unknown": "key"
}
""",
],
)
def test_malformed_manifests(json_data):
with pytest.raises(MalformedSchema1Manifest):
DockerSchema1Manifest(Bytes.for_string_or_unicode(json_data))
# Format of the key id expected in signed schema 1 manifests
# https://docs.docker.com/registry/spec/auth/jwt/
KID_FORMAT_REGEX = "([A-Z0-9]{4}:){11}[A-Z0-9]{4}"
MANIFEST_BYTES = json.dumps(
{
"name": "hello-world",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{"blobSum": "sha256:cd8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11"},
{"blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11"},
{"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"},
],
"history": [
{"v1Compatibility": '{"id":"sizedid", "parent": "someid", "Size": 1234}'},
{"v1Compatibility": '{"id":"someid", "parent": "anotherid"}'},
{"v1Compatibility": '{"id":"anotherid"}'},
],
"schemaVersion": 1,
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4",
"kty": "EC",
"x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A",
"y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010",
},
"alg": "ES256",
},
"signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg",
"protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ",
}
],
}
)
def test_valid_manifest():
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES), validate=False)
assert len(manifest.signatures) == 1
assert manifest.namespace == ""
assert manifest.repo_name == "hello-world"
assert manifest.tag == "latest"
assert manifest.image_ids == {"sizedid", "someid", "anotherid"}
assert manifest.parent_image_ids == {"someid", "anotherid"}
assert manifest.layers_compressed_size == 1234
assert manifest.config_media_type is None
assert len(manifest.layers) == 3
assert manifest.layers[0].v1_metadata.image_id == "anotherid"
assert manifest.layers[0].v1_metadata.parent_image_id is None
assert manifest.layers[1].v1_metadata.image_id == "someid"
assert manifest.layers[1].v1_metadata.parent_image_id == "anotherid"
assert manifest.layers[2].v1_metadata.image_id == "sizedid"
assert manifest.layers[2].v1_metadata.parent_image_id == "someid"
assert manifest.layers[0].compressed_size is None
assert manifest.layers[1].compressed_size is None
assert manifest.layers[2].compressed_size == 1234
assert manifest.leaf_layer == manifest.layers[2]
assert manifest.created_datetime is None
unsigned = manifest.unsigned()
assert unsigned.namespace == manifest.namespace
assert unsigned.repo_name == manifest.repo_name
assert unsigned.tag == manifest.tag
assert unsigned.layers == manifest.layers
assert unsigned.blob_digests == manifest.blob_digests
assert unsigned.digest != manifest.digest
image_layers = list(manifest.get_layers(None))
assert len(image_layers) == 3
for index in range(0, 3):
assert image_layers[index].layer_id == manifest.layers[index].v1_metadata.image_id
assert image_layers[index].blob_digest == manifest.layers[index].digest
assert image_layers[index].command == manifest.layers[index].v1_metadata.command
def test_validate_manifest():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "validated_manifest.json"), "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == "sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571"
assert manifest.created_datetime
def test_validate_manifest_with_unicode():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "validated_manifest_with_unicode.json"), "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == "sha256:815ecf45716a96b19d54d911e6ace91f78bab26ca0dd299645d9995dacd9f1ef"
assert manifest.created_datetime
def test_validate_manifest_with_unicode_encoded():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "manifest_unicode_row.json"), "r") as f:
manifest_bytes = json.loads(f.read())[0]["json_data"]
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == "sha256:dde3714ce7e23edc6413aa85c0b42792e4f2f79e9ea36afc154d63ff3d04e86c"
assert manifest.created_datetime
def test_validate_manifest_with_unencoded_unicode():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "manifest_unencoded_unicode.json"), "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
digest = manifest.digest
assert digest == "sha256:5d8a0f34744a39bf566ba430251adc0cc86587f86aed3ac2acfb897f349777bc"
assert manifest.created_datetime
layers = list(manifest.get_layers(None))
assert layers[-1].author == "Sômé guy"
@pytest.mark.parametrize(
"with_key",
[
None,
docker_v2_signing_key,
],
)
def test_build_unencoded_unicode_manifest(with_key):
builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag")
builder.add_layer(
"sha256:abcde",
json.dumps(
{
"id": "someid",
"author": "Sômé guy",
},
ensure_ascii=False,
),
)
built = builder.build(with_key, ensure_ascii=False)
# Assert kid was created correctly
# https://docs.docker.com/registry/spec/auth/jwt/
if with_key is not None:
assert len(built.signatures) == 1
assert re.match(KID_FORMAT_REGEX, built.signatures[0]["header"]["jwk"]["kid"])
built._validate()
def test_validate_manifest_known_issue():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "validate_manifest_known_issue.json"), "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
digest = manifest.digest
assert digest == "sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a"
assert manifest.created_datetime
layers = list(manifest.get_layers(None))
assert layers[-1].author is None
@pytest.mark.parametrize(
"with_key",
[
None,
docker_v2_signing_key,
],
)
def test_validate_manifest_with_emoji(with_key):
builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag")
builder.add_layer(
"sha256:abcde",
json.dumps(
{
"id": "someid",
"author": "😱",
},
ensure_ascii=False,
),
)
built = builder.build(with_key, ensure_ascii=False)
built._validate()
# Ensure the manifest can be reloaded.
built_bytes = built.bytes.as_encoded_str()
DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
@pytest.mark.parametrize(
"with_key",
[
None,
docker_v2_signing_key,
],
)
def test_validate_manifest_with_none_metadata_layer(with_key):
builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag")
builder.add_layer("sha256:abcde", None)
built = builder.build(with_key, ensure_ascii=False)
built._validate()
# Ensure the manifest can be reloaded.
built_bytes = built.bytes.as_encoded_str()
DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
def test_build_with_metadata_removed():
builder = DockerSchema1ManifestBuilder("somenamespace", "somerepo", "sometag")
builder.add_layer(
"sha256:abcde",
json.dumps(
{
"id": "someid",
"parent": "someid",
"author": "😱",
"comment": "hello world!",
"created": "1975-01-02 12:34",
"Size": 5678,
"container_config": {
"Cmd": "foobar",
"more": "stuff",
"goes": "here",
},
}
),
)
builder.add_layer(
"sha256:abcde",
json.dumps(
{
"id": "anotherid",
"author": "😱",
"created": "1985-02-03 12:34",
"Size": 1234,
"container_config": {
"Cmd": "barbaz",
"more": "stuff",
"goes": "here",
},
}
),
)
built = builder.build(None)
built._validate()
assert built.leaf_layer_v1_image_id == "someid"
with_metadata_removed = builder.with_metadata_removed().build()
with_metadata_removed._validate()
built_layers = list(built.get_layers(None))
with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
assert len(built_layers) == len(with_metadata_removed_layers)
for index, built_layer in enumerate(built_layers):
with_metadata_removed_layer = with_metadata_removed_layers[index]
assert built_layer.layer_id == with_metadata_removed_layer.layer_id
assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
assert built_layer.command == with_metadata_removed_layer.command
assert built_layer.comment == with_metadata_removed_layer.comment
assert built_layer.author == with_metadata_removed_layer.author
assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
assert built.leaf_layer_v1_image_id == with_metadata_removed.leaf_layer_v1_image_id
assert built_layers[-1].layer_id == built.leaf_layer_v1_image_id
assert json.loads(built_layers[-1].internal_layer.raw_v1_metadata) == json.loads(
with_metadata_removed_layers[-1].internal_layer.raw_v1_metadata
)
def test_validate_manifest_without_metadata():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, "validated_manifest.json"), "r") as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
digest = manifest.digest
assert digest == "sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571"
assert manifest.created_datetime
with_metadata_removed = manifest._unsigned_builder().with_metadata_removed().build()
assert with_metadata_removed.leaf_layer_v1_image_id == manifest.leaf_layer_v1_image_id
manifest_layers = list(manifest.get_layers(None))
with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
assert len(manifest_layers) == len(with_metadata_removed_layers)
for index, built_layer in enumerate(manifest_layers):
with_metadata_removed_layer = with_metadata_removed_layers[index]
assert built_layer.layer_id == with_metadata_removed_layer.layer_id
assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
assert built_layer.command == with_metadata_removed_layer.command
assert built_layer.comment == with_metadata_removed_layer.comment
assert built_layer.author == with_metadata_removed_layer.author
assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
assert with_metadata_removed.digest != manifest.digest
assert with_metadata_removed.namespace == manifest.namespace
assert with_metadata_removed.repo_name == manifest.repo_name
assert with_metadata_removed.tag == manifest.tag
assert with_metadata_removed.created_datetime == manifest.created_datetime
assert with_metadata_removed.checksums == manifest.checksums
assert with_metadata_removed.image_ids == manifest.image_ids
assert with_metadata_removed.parent_image_ids == manifest.parent_image_ids
|
f3a7e902c28f33a8355ca69cb0cf3dfa0982b790
|
d79190b06a1a2811fc0786817df0aa3ec15881ca
|
/tests/test_axis.py
|
a6dfb1c9d3013a29958dd89da4c375ab8e140eeb
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-hep/hist
|
165e29a3822366bc9dc96ae91a7d7ba4e3f38391
|
6caab065da1e992193b0acbbe08ce72d169c2994
|
refs/heads/main
| 2023-08-17T14:02:27.091813
| 2023-08-10T15:09:34
| 2023-08-10T15:09:34
| 239,605,861
| 119
| 23
|
BSD-3-Clause
| 2023-09-14T15:13:41
| 2020-02-10T20:24:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
test_axis.py
|
from __future__ import annotations
import pytest
from hist import axis, hist
def test_axis_names():
"""
Test axis names -- whether axis names work.
"""
assert axis.Regular(50, -3, 3, name="x0")
assert axis.Boolean(name="x_")
assert axis.Variable(range(-3, 3), name="xx")
assert axis.Integer(-3, 3, name="x_x")
assert axis.IntCategory(range(-3, 3), name="X__X")
assert axis.StrCategory("FT", name="X00")
assert axis.Regular(50, -3, 3, name="")
assert axis.Boolean(name="")
assert axis.Variable(range(-3, 3))
assert axis.Integer(-3, 3, name="")
assert axis.IntCategory(range(-3, 3), name="")
assert axis.StrCategory("FT")
def test_axis_flow():
assert axis.Regular(9, 0, 8, flow=False) == axis.Regular(
9, 0, 8, underflow=False, overflow=False
)
assert axis.Variable([1, 2, 3], flow=False) == axis.Variable(
[1, 2, 3], underflow=False, overflow=False
)
assert axis.Integer(0, 8, flow=False) == axis.Integer(
0, 8, underflow=False, overflow=False
)
assert axis.Regular(9, 0, 8, flow=False, underflow=True) == axis.Regular(
9, 0, 8, overflow=False
)
assert axis.Variable([1, 2, 3], flow=False, underflow=True) == axis.Variable(
[1, 2, 3], overflow=False
)
assert axis.Integer(0, 8, flow=False, underflow=True) == axis.Integer(
0, 8, overflow=False
)
assert axis.Regular(9, 0, 8, flow=False, overflow=True) == axis.Regular(
9, 0, 8, underflow=False
)
assert axis.Variable([1, 2, 3], flow=False, overflow=True) == axis.Variable(
[1, 2, 3], underflow=False
)
assert axis.Integer(0, 8, flow=False, overflow=True) == axis.Integer(
0, 8, underflow=False
)
def test_axis_disallowed_names():
with pytest.warns(UserWarning):
hist.Hist(axis.Regular(10, 0, 10, name="weight"))
with pytest.warns(UserWarning):
hist.Hist(axis.Regular(10, 0, 10, name="sample"))
with pytest.warns(UserWarning):
hist.Hist(axis.Regular(10, 0, 10, name="threads"))
|
dc3b0e383c03e0c17b7c040887018dd24d80b2de
|
7678a802e83ba88cc0ca59066493a581cf3ae009
|
/Giveme5W1H/examples/evaluation/evaluation.py
|
b70e957ebbcebc3d86ee0695f72eb19c685e06c2
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
fhamborg/Giveme5W1H
|
152145eee92062ae227da910080bcd6c6b24db4a
|
657738781fe387d76e6e0da35ed009ccf81f4290
|
refs/heads/master
| 2023-08-24T21:58:50.638812
| 2022-03-31T19:00:28
| 2022-03-31T19:00:28
| 91,314,968
| 495
| 95
|
Apache-2.0
| 2023-08-15T09:10:43
| 2017-05-15T08:43:39
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,017
|
py
|
evaluation.py
|
import csv
import os
"""
evaluates results by calculating:
- ICR(pairwise intercoder reliability AB, BC, AC)
- GP(precision_generalized)
Results are global and per category
"""
filename = 'evaluation_data_how.csv'
# change csv column index, if necessary here
category_index = 2
coder_a_index = 5
coder_b_index = 6
coder_c_index = 7
# measure_agreement function to keep code more readable
def measure_agreement(a, b):
if a == b:
return 1
else:
return 0
# convert ICR rating from 0 to 2 to GP scala 0 - 1
# (done on purpose in easy to read way, aka without normalization )
def to_precision_generalized(a):
if a == 0:
# not relevant:
return 0
elif a == 1:
# partial relevant
return 0.5
else:
# relevant
return 1
with open(os.path.dirname(__file__) + '/' + filename, 'r') as csvfile:
reader = csv.reader(csvfile)
is_header = True
ICR = 0
ICR_cat = {}
generalized_precision = 0
generalized_precision_cat = {}
aggrement = []
for line in reader:
if is_header:
is_header = False
else:
category = line[category_index]
coder_a = int(line[coder_a_index])
coder_b = int(line[coder_b_index])
coder_c = int(line[coder_c_index])
# measure pairwise agreement AB, AC, CB
ab = measure_agreement(coder_a, coder_b)
ac = measure_agreement(coder_a, coder_c)
cb = measure_agreement(coder_c, coder_b)
# measure agreement of the pairs
# inter-rater reliability is based on agreement between pairs of raters.
line_agreement = (ab + ac + cb) / 3
# irc global
ICR = ICR + line_agreement
# irc per category
ICR_cat[category] = ICR_cat.get(category, 0) + line_agreement
# gp global
tmp_gp = to_precision_generalized(coder_a) + to_precision_generalized(coder_b) + to_precision_generalized(
coder_c)
generalized_precision = generalized_precision + tmp_gp
# gp per category
generalized_precision_cat[category] = generalized_precision_cat.get(category, 0) + tmp_gp
# saved, for possible output
aggrement.append((category, ab, ac, cb, line_agreement, tmp_gp))
line_count = len(aggrement)
cat_count = len(ICR_cat)
line_count_cat = line_count / cat_count
# for GP: summarize all ratings dividing by the number of all ratings
rating_count = line_count * 3 # each doc was rated by 3 coder
rating_count_cat = rating_count / cat_count
# output
print('Global ICR: ' + str(ICR / line_count))
print('Global GP: ' + str(generalized_precision / rating_count))
for cat in ICR_cat:
val = ICR_cat[cat]
print(cat + ' ICR: ' + str(val / line_count_cat))
val = generalized_precision_cat[cat]
print(cat + ' GP: ' + str(val / rating_count_cat))
|
25b892a7b763009f31b4af0432e175415aa879d6
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/_IconServices.py
|
9337e9cd0796aa005685cc549851ed611e8adf9a
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
_IconServices.py
|
"""
Classes from the 'IconServices' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
ISImageSpecification = _Class("ISImageSpecification")
ISHintedValue = _Class("ISHintedValue")
ISHintedFloat = _Class("ISHintedFloat")
ISHintedSize = _Class("ISHintedSize")
ISHintedRect = _Class("ISHintedRect")
ISIconObserver = _Class("ISIconObserver")
ISIconDecoration = _Class("ISIconDecoration")
ISiosDocumentRecipe = _Class("ISiosDocumentRecipe")
ISmacosDocumentRecipe = _Class("ISmacosDocumentRecipe")
ISiosmacDocumentRecipe = _Class("ISiosmacDocumentRecipe")
ISSymbolImageDescriptor = _Class("ISSymbolImageDescriptor")
ISImageDescriptor = _Class("ISImageDescriptor")
ISImage = _Class("ISImage")
ISSymbolImage = _Class("ISSymbolImage")
ISPlaceholderImage = _Class("ISPlaceholderImage")
ISConcreteImage = _Class("ISConcreteImage")
ISCacheImage = _Class("ISCacheImage")
ISDeviceInfo = _Class("ISDeviceInfo")
ISBlurEffect = _Class("ISBlurEffect")
ISBorderEffect = _Class("ISBorderEffect")
ISDimmedDarkEffect = _Class("ISDimmedDarkEffect")
ISDimmedEffect = _Class("ISDimmedEffect")
ISEmbossedEffect = _Class("ISEmbossedEffect")
ISDropShaddowEffect = _Class("ISDropShaddowEffect")
ISIconManager = _Class("ISIconManager")
ISImageBag = _Class("ISImageBag")
ISIconResourceLocator = _Class("ISIconResourceLocator")
ISIconTypeResourceLocator = _Class("ISIconTypeResourceLocator")
ISBundle = _Class("ISBundle")
ISCenterEmbossRecipe = _Class("ISCenterEmbossRecipe")
ISLeadingStatusBadgeRecipe = _Class("ISLeadingStatusBadgeRecipe")
ISTrailingStatusBadgeRecipe = _Class("ISTrailingStatusBadgeRecipe")
ISShapeCompositorResource = _Class("ISShapeCompositorResource")
ISCircle = _Class("ISCircle")
ISContinuousRoundedRect = _Class("ISContinuousRoundedRect")
ISDefaults = _Class("ISDefaults")
ISIconDecorationResource = _Class("ISIconDecorationResource")
ISGraphicsContext = _Class("ISGraphicsContext")
ISColor = _Class("ISColor")
ISLayer = _Class("ISLayer")
ISCompositor = _Class("ISCompositor")
_ISCompositorElement = _Class("_ISCompositorElement")
ISIconSpecification = _Class("ISIconSpecification")
ISImageCache = _Class("ISImageCache")
ISIconCacheIOS = _Class("ISIconCacheIOS")
ISIconCacheClient = _Class("ISIconCacheClient")
ISIcon = _Class("ISIcon")
ISGenericIconIOS = _Class("ISGenericIconIOS")
ISIconIOS = _Class("ISIconIOS")
ISBundleIcon = _Class("ISBundleIcon")
ISImageBagIcon = _Class("ISImageBagIcon")
ISIconFactory = _Class("ISIconFactory")
ISGenericRecipe = _Class("ISGenericRecipe")
ISMessagesAppRecipe = _Class("ISMessagesAppRecipe")
ISwatchOSAppRecipe = _Class("ISwatchOSAppRecipe")
ISiOSAppClipRecipe = _Class("ISiOSAppClipRecipe")
ISiOSAppRecipe = _Class("ISiOSAppRecipe")
ISiOSMacAppRecipe = _Class("ISiOSMacAppRecipe")
ISAssetCatalogResource = _Class("ISAssetCatalogResource")
ISResourceMetadata = _Class("ISResourceMetadata")
ISIconLayer = _Class("ISIconLayer")
|
a8164939675a5ed511690b1f66d085e89edce484
|
172ef065015432a6e7ce44cf936fdbf22d71571f
|
/bin/gogo
|
257a0b311ae98ffa156160ca168942e7f169cd3a
|
[
"MIT"
] |
permissive
|
reubano/pygogo
|
360b559b12332090e850157d94b28f405da93c43
|
7a78062b1f996c541c5ff68ffafc231bdc07ebcf
|
refs/heads/master
| 2022-06-04T13:19:16.650470
| 2021-12-28T18:35:03
| 2021-12-28T18:35:03
| 48,742,215
| 310
| 20
|
MIT
| 2023-08-31T14:44:25
| 2015-12-29T10:31:02
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
gogo
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A Python logger with super powers """
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import sys
sys.path.append('../pygogo')
from pygogo import main
if __name__ == '__main__':
main.run()
|
|
aedce828cf2704b8c67c536cf750e613c7b600ba
|
af41ca2086f7da6ca036921b2e2cec89e0e5d522
|
/src/Pyro4/socketserver/threadpoolserver.py
|
31aa7557942f81263b02448fbd8159fc8eca5737
|
[
"MIT"
] |
permissive
|
irmen/Pyro4
|
023830905bb0d8fc25aed8e990631268f7fbe52c
|
8ec0db055d76ae1512239710b1e30883ee6bd74b
|
refs/heads/master
| 2023-08-22T10:18:47.878310
| 2023-06-04T16:00:32
| 2023-06-04T16:00:32
| 11,037,154
| 667
| 105
|
MIT
| 2022-06-26T14:23:01
| 2013-06-28T20:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 10,057
|
py
|
threadpoolserver.py
|
"""
Socket server based on a worker thread pool. Doesn't use select.
Uses a single worker thread per client connection.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import socket
import logging
import sys
import time
import threading
import os
from Pyro4 import socketutil, errors, util
from Pyro4.configuration import config
from .threadpool import Pool, NoFreeWorkersError
from .multiplexserver import selectors
log = logging.getLogger("Pyro4.threadpoolserver")
_client_disconnect_lock = threading.Lock()
class ClientConnectionJob(object):
"""
Takes care of a single client connection and all requests
that may arrive during its life span.
"""
def __init__(self, clientSocket, clientAddr, daemon):
self.csock = socketutil.SocketConnection(clientSocket)
self.caddr = clientAddr
self.daemon = daemon
def __call__(self):
if self.handleConnection():
try:
while True:
try:
self.daemon.handleRequest(self.csock)
except (socket.error, errors.ConnectionClosedError):
# client went away.
log.debug("disconnected %s", self.caddr)
break
except errors.SecurityError:
log.debug("security error on client %s", self.caddr)
break
except errors.TimeoutError as x:
# for timeout errors we're not really interested in detailed traceback info
log.warning("error during handleRequest: %s" % x)
break
except:
# other errors log a warning, break this loop and close the client connection
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
msg = "error during handleRequest: %s; %s" % (ex_v, "".join(tb))
log.warning(msg)
break
finally:
with _client_disconnect_lock:
try:
self.daemon._clientDisconnect(self.csock)
except Exception as x:
log.warning("Error in clientDisconnect: " + str(x))
self.csock.close()
def handleConnection(self):
# connection handshake
try:
if self.daemon._handshake(self.csock):
return True
self.csock.close()
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
log.warning("error during connect/handshake: %s; %s", ex_v, "\n".join(tb))
self.csock.close()
return False
def denyConnection(self, reason):
log.warning("client connection was denied: " + reason)
# return failed handshake
self.daemon._handshake(self.csock, denied_reason=reason)
self.csock.close()
class Housekeeper(threading.Thread):
def __init__(self, daemon):
super(Housekeeper, self).__init__(name="housekeeper")
self.pyroDaemon = daemon
self.stop = threading.Event()
self.daemon = True
self.waittime = min(config.POLLTIMEOUT or 0, max(config.COMMTIMEOUT or 0, 5))
def run(self):
while True:
if self.stop.wait(self.waittime):
break
self.pyroDaemon._housekeeping()
class SocketServer_Threadpool(object):
"""transport server for socket connections, worker thread pool version."""
def __init__(self):
self.daemon = self.sock = self._socketaddr = self.locationStr = self.pool = None
self.shutting_down = False
self.housekeeper = None
self._selector = selectors.DefaultSelector() if selectors else None
def init(self, daemon, host, port, unixsocket=None):
log.info("starting thread pool socketserver")
self.daemon = daemon
self.sock = None
bind_location = unixsocket if unixsocket else (host, port)
if config.SSL:
sslContext = socketutil.getSSLcontext(servercert=config.SSL_SERVERCERT,
serverkey=config.SSL_SERVERKEY,
keypassword=config.SSL_SERVERKEYPASSWD,
cacerts=config.SSL_CACERTS)
log.info("using SSL, cert=%s key=%s cacerts=%s", config.SSL_SERVERCERT, config.SSL_SERVERKEY, config.SSL_CACERTS)
else:
sslContext = None
log.info("not using SSL")
self.sock = socketutil.createSocket(bind=bind_location,
reuseaddr=config.SOCK_REUSE,
timeout=config.COMMTIMEOUT,
noinherit=True,
nodelay=config.SOCK_NODELAY,
sslContext=sslContext)
self._socketaddr = self.sock.getsockname()
if not unixsocket and self._socketaddr[0].startswith("127."):
if host is None or host.lower() != "localhost" and not host.startswith("127."):
log.warning("weird DNS setup: %s resolves to localhost (127.x.x.x)", host)
if unixsocket:
self.locationStr = "./u:" + unixsocket
else:
host = host or self._socketaddr[0]
port = port or self._socketaddr[1]
if ":" in host: # ipv6
self.locationStr = "[%s]:%d" % (host, port)
else:
self.locationStr = "%s:%d" % (host, port)
self.pool = Pool()
self.housekeeper = Housekeeper(daemon)
self.housekeeper.start()
if self._selector:
self._selector.register(self.sock, selectors.EVENT_READ, self)
def __del__(self):
if self.sock is not None:
self.sock.close()
self.sock = None
if self.pool is not None:
self.pool.close()
self.pool = None
if self.housekeeper:
self.housekeeper.stop.set()
self.housekeeper.join()
self.housekeeper = None
def __repr__(self):
return "<%s on %s; %d workers>" % (self.__class__.__name__, self.locationStr, self.pool.num_workers())
def loop(self, loopCondition=lambda: True):
log.debug("threadpool server requestloop")
while (self.sock is not None) and not self.shutting_down and loopCondition():
try:
self.events([self.sock])
except (socket.error, OSError) as x:
if not loopCondition():
# swallow the socket error if loop terminates anyway
# this can occur if we are asked to shutdown, socket can be invalid then
break
# socket errors may not lead to a server abort, so we log it and continue
err = getattr(x, "errno", x.args[0])
log.warning("socket error '%s' with errno=%d, shouldn't happen", x, err)
continue
except KeyboardInterrupt:
log.debug("stopping on break signal")
break
def combine_loop(self, server):
raise TypeError("You can't use the loop combiner on the threadpool server type")
def events(self, eventsockets):
"""used for external event loops: handle events that occur on one of the sockets of this server"""
# we only react on events on our own server socket.
# all other (client) sockets are owned by their individual threads.
assert self.sock in eventsockets
try:
if self._selector:
events = self._selector.select(config.POLLTIMEOUT)
if not events:
return
csock, caddr = self.sock.accept()
if self.shutting_down:
csock.close()
return
if hasattr(csock, "getpeercert"):
log.debug("connected %s - SSL", caddr)
else:
log.debug("connected %s - unencrypted", caddr)
if config.COMMTIMEOUT:
csock.settimeout(config.COMMTIMEOUT)
job = ClientConnectionJob(csock, caddr, self.daemon)
try:
self.pool.process(job)
except NoFreeWorkersError:
job.denyConnection("no free workers, increase server threadpool size")
except socket.timeout:
pass # just continue the loop on a timeout on accept
def shutdown(self):
self.shutting_down = True
self.wakeup()
time.sleep(0.05)
self.close()
self.sock = None
def close(self):
if self.housekeeper:
self.housekeeper.stop.set()
self.housekeeper.join()
self.housekeeper = None
if self.sock:
sockname = None
try:
sockname = self.sock.getsockname()
except (socket.error, OSError):
pass
try:
self.sock.close()
if type(sockname) is str:
# it was a Unix domain socket, remove it from the filesystem
if os.path.exists(sockname):
os.remove(sockname)
except Exception:
pass
self.sock = None
self.pool.close()
@property
def sockets(self):
# the server socket is all we care about, all client sockets are running in their own threads
return [self.sock]
@property
def selector(self):
raise TypeError("threadpool server doesn't have multiplexing selector")
def wakeup(self):
socketutil.interruptSocket(self._socketaddr)
|
0f7f2d2d1a2696af422056df6263413eaa36fb33
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/topology/constraint/vlan.py
|
7e27c1604e543e1d3a682f1adea11ad3f5d24578
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,298
|
py
|
vlan.py
|
# ----------------------------------------------------------------------
# VLANConstraint
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import operator
# Third-party modules
from typing import Dict, Set
import cachetools
# NOC modules
from noc.inv.models.link import Link
from noc.inv.models.subinterface import SubInterface
from .base import BaseConstraint
class VLANConstraint(BaseConstraint):
"""
Follow only links with specific VLANs
:param vlan: VLAN id to check
:param allow_tagged: Allow links with tagged `vlan`
:param allow_untagged: Allow links with untagged `vlan`
:param strict: True - Both ends of the links must satisfy criteria,
False - At least one end of the link must satisfy criteria.
"""
def __init__(
self,
vlan: int = 1,
allow_tagged: bool = True,
allow_untagged: bool = True,
strict: bool = True,
) -> None:
super().__init__()
self.vlan = vlan
self.allow_tagged = allow_tagged
self.allow_untagged = allow_untagged
self._is_valid_link_cache: Dict[Link, bool] = {}
self.strict = strict
@cachetools.cachedmethod(operator.attrgetter("_is_valid_link_cache"))
def is_valid_link(self, link: Link) -> bool:
bridged_mo: Set[int] = set()
tagged_mo: Set[int] = set()
untagged_mo: Set[int] = set()
l3_mo = set()
for doc in SubInterface._get_collection().find(
{"interface": {"$in": link.interface_ids}},
{
"_id": 0,
"managed_object": 1,
"enabled_afi": 1,
"untagged_vlan": 1,
"tagged_vlans": 1,
"vlan_ids": 1,
},
):
if "BRIDGE" in doc["enabled_afi"]:
bridged_mo.add(doc["managed_object"])
if doc.get("untagged_vlan") == self.vlan:
untagged_mo.add(doc["managed_object"])
if self.vlan in doc.get("tagged_vlans", []):
tagged_mo.add(doc["managed_object"])
if self.vlan in doc.get("vlan_ids", []):
l3_mo.add(doc["managed_object"])
if self.strict:
# Both ends must satisfy
if len(bridged_mo) > 1 and (
(self.allow_tagged and len(tagged_mo) > 1)
or (self.allow_untagged and len(untagged_mo) > 1)
):
# Bridge-to-Bridge
return True
if (
len(bridged_mo) == 1
and ((self.allow_tagged and tagged_mo) or (self.allow_untagged and untagged_mo))
and len(l3_mo) == 1
):
# L3 to bridge
return True
else:
if len(bridged_mo) == 1 and (
(self.allow_tagged and len(tagged_mo) > 1)
or (self.allow_untagged and len(untagged_mo) > 1)
):
# Bridge-to-Bridge
return True
if len(l3_mo) == 1:
# L3 to bridge
return True
return False
|
0a3240f01292bc0d85cc75e2b96d402c78b308f9
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/okcoin.py
|
8796a7658aec75d7f5f7dcb3cbb5a97ebcb16771
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 178,057
|
py
|
okcoin.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.okcoin import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class okcoin(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(okcoin, self).describe(), {
'id': 'okcoin',
'name': 'OKCoin',
'countries': ['CN', 'US'],
'version': 'v3',
# cheapest endpoint is 100 requests per 2 seconds
# 50 requests per second => 1000 / 50 = 20ms
'rateLimit': 20,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': None,
'future': True,
'option': None,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': None,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawals': True,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okcoin.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87295551-102fbf00-c50e-11ea-90a9-462eebba5829.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okcoin.com',
'doc': 'https://www.okcoin.com/docs/en/',
'fees': 'https://www.okcoin.com/coin-fees',
'referral': 'https://www.okcoin.com/account/register?flag=activity&channelId=600001513',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': {
'time': 8.3334,
},
},
'account': {
'get': {
'wallet': 8.3334,
'sub-account': 1000,
'asset-valuation': 1000,
'wallet/{currency}': 8.3334,
'withdrawal/history': 8.3334,
'withdrawal/history/{currency}': 8.3334,
'ledger': 5,
'deposit/address': 8.3334,
'deposit/history': 8.3334,
'deposit/history/{currency}': 8.3334,
'currencies': 8.3334,
'withdrawal/fee': 8.3334,
'deposit-lightning': 50,
'withdrawal-lightning': 50,
'fiat/deposit/detail': 5,
'fiat/deposit/details': 8.3334,
'fiat/withdraw/detail': 5,
'fiat/withdraw/details': 8.3334,
'fiat/channel': 8.3334,
},
'post': {
'transfer': 100, # 1 request per 2 seconds(per currency)
'withdrawal': 8.3334,
'fiat/cancel_deposit': 1,
'fiat/deposit': 8.3334,
'fiat/withdraw': 8.3334,
'fiat/cancel_withdrawal': 1,
},
},
# TODO fix signing issue in sign()
# all other endpoints of the format
# api/account/v3/wallet
# otc endpoints actually of the format: (exchanged places)
# api/v3/otc/rfq/instruments
'otc': {
'get': {
'rfq/instruments': 50, # represents: GET api/v3/otc/rfq/instruments
'rfq/trade': 50,
'rfq/history': 50,
},
'post': {
'rfq/quote': 50,
'rfq/trade': 50,
},
},
# TODO fix signing issue
'users': {
'get': {
'subaccount-info': 20,
'account-info': 20,
'subaccount/apikey': 20,
},
'post': {
'create-subaccount': 5, # represents: POST api/v3/users/create-subaccount
'delete-subaccount': 5,
'subaccount/apikey': 50,
'subacount/delete-apikey': 20,
'subacount/modify-apikey': 20,
},
},
'earning': {
'get': {
'offers': 5,
'orders': 5,
'positions': 8.3334,
},
'post': {
'purchase': 5,
'redeem': 5,
'cancel': 5,
},
},
'spot': {
'get': {
'accounts': 5,
'accounts/{currency}': 5,
'accounts/{currency}/ledger': 5,
'orders': 10,
'orders_pending': 5,
'orders/{order_id}': 5,
'orders/{client_oid}': 5,
'trade_fee': 5,
'fills': 10,
'algo': 5,
# public
'instruments': 5,
'instruments/{instrument_id}/book': 5,
'instruments/ticker': 5,
'instruments/{instrument_id}/ticker': 5,
'instruments/{instrument_id}/trades': 5,
'instruments/{instrument_id}/candles': 5,
},
'post': {
'order_algo': 2.5,
'orders': 1,
'batch_orders': 2,
'cancel_orders/{order_id}': 1,
'cancel_orders/{client_oid}': 1,
'cancel_batch_algos': 5,
'cancel_batch_orders': 5,
'amend_order/{instrument_id}': 2.5,
'amend_batch_orders': 5,
},
},
'margin': {
# Margin trading closed down on February 21, 2022
'get': {
'accounts': 5,
'accounts/{instrument_id}': 5,
'accounts/{instrument_id}/ledger': 5,
'accounts/availability': 5,
'accounts/{instrument_id}/availability': 5,
'accounts/borrowed': 5,
'accounts/{instrument_id}/borrowed': 5,
'orders': 10,
'accounts/{instrument_id}/leverage': 1,
'orders/{order_id}': 5,
'orders/{client_oid}': 5,
'orders_pending': 5,
'fills': 10,
# public
'instruments/{instrument_id}/mark_price': 5,
},
'post': {
'accounts/borrow': 1,
'accounts/repayment': 1,
'orders': 1,
'batch_orders': 2,
'cancel_orders': 1,
'cancel_orders/{order_id}': 1,
'cancel_orders/{client_oid}': 1,
'cancel_batch_orders': 2,
'amend_order/{instrument_id}': 2.5,
'amend_batch_orders': 5,
'accounts/{instrument_id}/leverage': 1,
},
},
'system': {
'get': {
'status': 250,
},
},
'market': {
'get': {
'oracle': 250,
},
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'information': {
'get': [
'{currency}/long_short_ratio',
'{currency}/volume',
'{currency}/taker',
'{currency}/sentiment',
'{currency}/margin',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.002,
'maker': 0.001,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again.': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not hasattr(self, supported) website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # str of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # str of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required hasattr(self, margin) setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required hasattr(self, margin) setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
'33085': InvalidOrder, # The value of the position and buying order has reached the position limit, and no further buying is allowed.
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': RateLimitExceeded, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': InsufficientFunds, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
'35102': RateLimitExceeded, # {"error_message":"The operation that close all at market price is too frequent","result":"true","error_code":"35102","order_id":"-1"}
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot'],
'defaultType': 'spot', # 'account', 'spot', 'futures', 'swap', 'option'
'accountsByType': {
'spot': '1',
'funding': '6',
'main': '6',
},
'accountsById': {
'1': 'spot',
'6': 'funding',
},
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
'warnOnFetchCurrenciesWithoutAuthorization': False,
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'TRADE': 'Unitrade',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
def fetch_markets(self, params={}):
"""
retrieves data on all markets for okcoin
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
optionType = self.safe_value(market, 'option_type')
contractVal = self.safe_number(market, 'contract_val')
contract = contractVal is not None
futuresAlias = self.safe_string(market, 'alias')
marketType = 'spot'
spot = not contract
option = (optionType is not None)
future = not option and (futuresAlias is not None)
swap = contract and not future and not option
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
settleId = self.safe_string(market, 'settlement_currency')
if option:
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
marketType = 'option'
elif future:
baseId = self.safe_string(market, 'underlying_index')
marketType = 'futures'
elif swap:
marketType = 'swap'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
expiryDatetime = self.safe_string(market, 'delivery')
expiry = None
strike = self.safe_value(market, 'strike')
if contract:
symbol = symbol + ':' + settle
if future or option:
if future:
expiryDatetime += 'T00:00:00Z'
expiry = self.parse8601(expiryDatetime)
symbol = symbol + '-' + self.yymmdd(expiry)
if option:
symbol = symbol + ':' + strike + ':' + optionType
optionType = 'call' if (optionType == 'C') else 'put'
lotSize = self.safe_number_2(market, 'lot_size', 'trade_increment')
minPrice = self.safe_string(market, 'tick_size')
minAmountString = self.safe_string_2(market, 'min_size', 'base_min_size')
minAmount = self.parse_number(minAmountString)
minCost = None
if (minAmount is not None) and (minPrice is not None):
minCost = self.parse_number(Precise.string_mul(minPrice, minAmountString))
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
maxLeverageString = self.safe_string(market, 'max_leverage', '1')
maxLeverage = self.parse_number(Precise.string_max(maxLeverageString, '1'))
precisionPrice = self.parse_number(minPrice)
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': marketType,
'spot': spot,
'margin': False,
'swap': swap,
'future': future,
'futures': future, # deprecated
'option': option,
'active': True,
'contract': contract,
'linear': (quote == settle) if contract else None,
'inverse': (base == settle) if contract else None,
'contractSize': contractVal,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': strike,
'optionType': optionType,
'precision': {
'amount': self.safe_number(market, 'size_increment', lotSize),
'price': precisionPrice,
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.parse_number(maxLeverage),
},
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precisionPrice,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType() does not support market type ' + type)
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: an associative dictionary of currencies
"""
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
if not self.check_required_credentials(False):
if self.options['warnOnFetchCurrenciesWithoutAuthorization']:
raise ExchangeError(self.id + ' fetchCurrencies() is a private API endpoint that requires authentication with API keys. Set the API keys on the exchange instance or exchange.options["warnOnFetchCurrenciesWithoutAuthorization"] = False to suppress self warning message.')
return None
else:
response = self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
depositEnabled = (canDeposit == 1)
withdrawEnabled = (canWithdraw == 1)
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None, # todo: redesign
'precision': self.parse_number('1e-8'), # todo: fix
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
# swap
#
# {
# "asks":[
# ["916.21","94","0","1"]
# ],
# "bids":[
# ["916.1","15","0","1"]
# ],
# "time":"2021-04-16T02:04:48.282Z"
# }
#
timestamp = self.parse8601(self.safe_string_2(response, 'timestamp', 'time'))
return self.parse_order_book(response, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
marketId = self.safe_string(ticker, 'instrument_id')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
last = self.safe_string(ticker, 'last')
open = self.safe_string(ticker, 'open_24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high_24h'),
'low': self.safe_string(ticker, 'low_24h'),
'bid': self.safe_string(ticker, 'best_bid'),
'bidVolume': self.safe_string(ticker, 'best_bid_size'),
'ask': self.safe_string(ticker, 'best_ask'),
'askVolume': self.safe_string(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_string(ticker, 'quote_volume_24h'),
'info': ticker,
}, market)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
def fetch_tickers_by_type(self, type, symbols: Optional[List[str]] = None, params={}):
self.load_markets()
symbols = self.market_symbols(symbols)
method = type + 'GetInstrumentsTicker'
response = getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
symbols = self.market_symbols(symbols)
first = self.safe_string(symbols, 0)
market = None
if first is not None:
market = self.market(first)
type = None
type, params = self.handle_market_type_and_params('fetchTickers', market, params)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
marketId = self.safe_string(trade, 'instrument_id')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'size', 'qty')
amountString = self.safe_string(trade, 'order_qty', amountString)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrency = market['base'] if (side == 'buy') else market['quote']
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': Precise.string_neg(feeCostString),
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, str):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, 'volume'), # Base Volume
]
def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.safe_string(self.timeframes, timeframe, timeframe),
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV() does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'hold')
account['free'] = self.safe_string(balance, 'available')
result[code] = account
return self.safe_balance(result)
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_string(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for j in range(0, len(contracts)):
contract = contracts[j]
fixedBalance = self.safe_string(contract, 'fixed_balance')
realizedPnl = self.safe_string(contract, 'realized_pnl')
marginFrozen = self.safe_string(contract, 'margin_frozen')
marginForUnfilled = self.safe_string(contract, 'margin_for_unfilled')
margin = Precise.string_sub(Precise.string_sub(Precise.string_add(fixedBalance, realizedPnl), marginFrozen), marginForUnfilled)
free = Precise.string_add(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_string(balance, 'realized_pnl')
unrealizedPnl = self.safe_string(balance, 'unrealized_pnl')
marginFrozen = self.safe_string(balance, 'margin_frozen')
marginForUnfilled = self.safe_string(balance, 'margin_for_unfilled')
positive = Precise.string_add(Precise.string_add(totalAvailBalance, realizedPnl), unrealizedPnl)
account['free'] = Precise.string_sub(Precise.string_sub(positive, marginFrozen), marginForUnfilled)
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
timestamp = None
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = self.safe_symbol(marketId)
balanceTimestamp = self.parse8601(self.safe_string(balance, 'timestamp'))
timestamp = balanceTimestamp if (timestamp is None) else max(timestamp, balanceTimestamp)
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
account['free'] = self.safe_string(balance, 'total_avail_balance')
result[symbol] = account
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'futures', 'swap')")
self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'futures', 'swap')")
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
request = self.extend(request, {
'side': side,
'type': type, # limit/market
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_number(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + ' createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options["createMarketBuyOrderRequiresPrice"] = False and supply the total cost value in the "amount" argument or in the "notional" extra parameter(the exchange-specific behaviour)')
else:
notional = amount if (notional is None) else notional
request['notional'] = self.cost_to_precision(symbol, notional)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'spotPostOrders'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spot orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
marketId = self.safe_string(order, 'instrument_id')
market = self.safe_market(marketId, market)
amount = self.safe_string(order, 'size')
filled = self.safe_string_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = Precise.string_max(amount, filled)
remaining = Precise.string_max('0', Precise.string_sub(amount, filled))
if type == 'market':
remaining = '0'
cost = self.safe_string_2(order, 'filled_notional', 'funds')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = Precise.string_mul(average, filled)
else:
if (average is None) and (filled is not None) and Precise.string_gt(filled, '0'):
average = Precise.string_div(cost, filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_number(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None # fix empty clientOrderId string
stopPrice = self.safe_number(order, 'trigger_price')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'triggerPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
def fetch_orders_by_state(self, state, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('6', symbol, since, limit, params)
def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string_2(depositAddress, 'memo', 'Memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code: str, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>`
"""
self.load_markets()
parts = code.split('-')
currency = self.currency(parts[0])
request = {
'currency': currency['id'],
}
response = self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addressesByCode = self.parse_deposit_addresses(response, [currency['code']])
address = self.safe_value(addressesByCode, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
def transfer(self, code: str, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `transfer structure <https://github.com/ccxt/ccxt/wiki/Manual#transfer-structure>`
"""
self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
request = {
'amount': self.currency_to_precision(code, amount),
'currency': currency['id'],
'from': fromId, # 1 spot, 6 funding
'to': toId, # 1 spot, 6 funding
'type': '0', # 0 Transfer between accounts in the main account/sub_account, 1 main account to sub_account, 2 sub_account to main account
}
if fromId == 'main':
request['type'] = '1'
request['sub_account'] = toId
request['to'] = '0'
elif toId == 'main':
request['type'] = '2'
request['sub_account'] = fromId
request['from'] = '0'
request['to'] = '6'
response = self.accountPostTransfer(self.extend(request, params))
#
# {
# "transfer_id": "754147",
# "currency": "ETC",
# "from": "6",
# "amount": "0.1",
# "to": "1",
# "result": True
# }
#
return self.parse_transfer(response, currency)
def parse_transfer(self, transfer, currency=None):
#
# {
# "transfer_id": "754147",
# "currency": "ETC",
# "from": "6",
# "amount": "0.1",
# "to": "1",
# "result": True
# }
#
accountsById = self.safe_value(self.options, 'accountsById', {})
return {
'info': transfer,
'id': self.safe_string(transfer, 'transfer_id'),
'timestamp': None,
'datetime': None,
'currency': self.safe_currency_code(self.safe_string(transfer, 'currency'), currency),
'amount': self.safe_number(transfer, 'amount'),
'fromAccount': self.safe_string(accountsById, self.safe_string(transfer, 'from')),
'toAccount': self.safe_string(accountsById, self.safe_string(transfer, 'to')),
'status': self.parse_transfer_status(self.safe_string(transfer, 'result')),
}
def parse_transfer_status(self, status):
statuses = {
'true': 'ok',
}
return self.safe_string(statuses, status, 'failed')
def withdraw(self, code: str, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str tag:
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a 'fee' string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # str. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return self.parse_transaction(response, currency)
def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all deposits made to an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch deposits for
:param int [limit]: the maximum number of deposits structures to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all withdrawals made from an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch withdrawals for
:param int [limit]: the maximum number of withdrawals structures to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "tag": "1234567",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'network': None,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amountString = None
costString = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amountString = self.safe_string(otherTrade, 'size')
costString = self.safe_string(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amountString = self.safe_string(userTrade, 'size')
costString = self.safe_string(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
priceString = self.safe_string(userTrade, 'price')
feeCostFirstString = self.safe_string(otherTrade, 'fee')
feeCostSecondString = self.safe_string(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
if (feeCostFirstString is not None) and not Precise.string_equals(feeCostFirstString, '0'):
if (feeCostSecondString is not None) and not Precise.string_equals(feeCostSecondString, '0'):
fees = [
{
'cost': Precise.string_neg(feeCostFirstString),
'currency': feeCurrencyCodeFirst,
},
{
'cost': Precise.string_neg(feeCostSecondString),
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': Precise.string_neg(feeCostFirstString),
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecondString is not None) and not Precise.string_equals(feeCostSecondString, '0'):
fee = {
'cost': Precise.string_neg(feeCostSecondString),
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': '0',
'currency': self.safe_currency_code(feeCurrencyId),
}
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # ←--- fee in received quote currency
# "price":"129.13", # ←------ price
# "size":"30.98616393", # ←-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # ←--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # ←--- fee in received base currency
# "price":"129.16", # ←------ price
# "size":"0.240322", # ←----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # ←-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
return self.safe_trade({
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
'fees': fees,
}, market)
def parse_my_trades(self, trades, market=None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
market = self.safe_market(None, market)
return self.filter_by_symbol_since_limit(result, market['symbol'], since, limit)
def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades structures to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
def fetch_order_trades(self, id: str, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades to retrieve
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_position(self, symbol: str, params={}):
"""
fetch data on a single open contract trade position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
type = market['type']
if (type == 'futures') or (type == 'swap'):
method = type + 'GetInstrumentIdPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = getattr(self, method)(self.extend(request, params))
#
# futures
#
# crossed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "2",
# "long_avail_qty": "2",
# "long_avg_cost": "8260",
# "long_settlement_price": "8260",
# "realised_pnl": "0.00020928",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_avg_cost": "8259.99",
# "short_settlement_price": "8259.99",
# "liquidation_price": "113.81",
# "instrument_id": "BTC-USD-191227",
# "leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T14:02:51.029Z",
# "margin_mode": "crossed",
# "short_margin": "0.00242197",
# "short_pnl": "6.63E-6",
# "short_pnl_ratio": "0.002477997",
# "short_unrealised_pnl": "6.63E-6",
# "long_margin": "0.00242197",
# "long_pnl": "-6.65E-6",
# "long_pnl_ratio": "-0.002478",
# "long_unrealised_pnl": "-6.65E-6",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8257.57"
# }
# ],
# "margin_mode": "crossed"
# }
#
# fixed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "4",
# "long_avail_qty": "4",
# "long_margin": "0.00323844",
# "long_liqui_price": "7762.09",
# "long_pnl_ratio": "0.06052306",
# "long_avg_cost": "8234.43",
# "long_settlement_price": "8234.43",
# "realised_pnl": "-0.00000296",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_margin": "0.00241105",
# "short_liqui_price": "9166.74",
# "short_pnl_ratio": "0.03318052",
# "short_avg_cost": "8295.13",
# "short_settlement_price": "8295.13",
# "instrument_id": "BTC-USD-191227",
# "long_leverage": "15",
# "short_leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T13:12:09.438Z",
# "margin_mode": "fixed",
# "short_margin_ratio": "0.10292507",
# "short_maint_margin_ratio": "0.005",
# "short_pnl": "7.853E-5",
# "short_unrealised_pnl": "7.853E-5",
# "long_margin_ratio": "0.07103743",
# "long_maint_margin_ratio": "0.005",
# "long_pnl": "1.9841E-4",
# "long_unrealised_pnl": "1.9841E-4",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8266.99"
# }
# ],
# "margin_mode": "fixed"
# }
#
# swap
#
# crossed margin mode
#
# {
# "margin_mode": "crossed",
# "timestamp": "2019-09-27T03:49:02.018Z",
# "holding": [
# {
# "avail_position": "3",
# "avg_cost": "59.49",
# "instrument_id": "LTC-USD-SWAP",
# "last": "55.98",
# "leverage": "10.00",
# "liquidation_price": "4.37",
# "maint_margin_ratio": "0.0100",
# "margin": "0.0536",
# "position": "3",
# "realized_pnl": "0.0000",
# "unrealized_pnl": "0",
# "settled_pnl": "-0.0330",
# "settlement_price": "55.84",
# "side": "long",
# "timestamp": "2019-09-27T03:49:02.018Z"
# },
# ]
# }
#
# fixed margin mode
#
# {
# "margin_mode": "fixed",
# "timestamp": "2019-09-27T03:47:37.230Z",
# "holding": [
# {
# "avail_position": "20",
# "avg_cost": "8025.0",
# "instrument_id": "BTC-USD-SWAP",
# "last": "8113.1",
# "leverage": "15.00",
# "liquidation_price": "7002.6",
# "maint_margin_ratio": "0.0050",
# "margin": "0.0454",
# "position": "20",
# "realized_pnl": "-0.0001",
# "unrealized_pnl": "0",
# "settled_pnl": "0.0076",
# "settlement_price": "8279.2",
# "side": "long",
# "timestamp": "2019-09-27T03:47:37.230Z"
# }
# ]
# }
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_positions(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch all open positions
:param str[]|None symbols: not used by okcoin fetchPositions
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict[]: a list of `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = getattr(self, method)(params)
#
# futures
#
# ...
#
#
# swap
#
# ...
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_ledger(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch the history of changes, actions done by the user or operations that altered balance of the user
:param str code: unified currency code, default is None
:param int [since]: timestamp in ms of the earliest ledger entry, default is None
:param int [limit]: max number of ledger entrys to return, default is None
:param dict [params]: extra parameters specific to the okcoin api endpoint
:returns dict: a `ledger structure <https://github.com/ccxt/ccxt/wiki/Manual#ledger-structure>`
"""
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if type == 'spot':
if code is None:
raise ArgumentsRequired(self.id + ' fetchLedger() requires a currency code argument for "' + type + '" markets')
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif type == 'futures':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires an underlying symbol for '" + type + "' markets")
argument = 'Underlying'
market = self.market(code) # we intentionally put a market inside here for the swap ledgers
marketInfo = self.safe_value(market, 'info', {})
settlementCurrencyId = self.safe_string(marketInfo, 'settlement_currency')
settlementCurrencyCode = self.safe_currency_code(settlementCurrencyId)
currency = self.currency(settlementCurrencyCode)
underlyingId = self.safe_string(marketInfo, 'underlying')
request['underlying'] = underlyingId
elif type == 'swap':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved result of a trade, spot accounts only
# rebate fee rebate fee schedule, spot accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
if type == 'swap':
ledgerEntries = self.parse_ledger(response)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(response, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # funds transfer in/out
'trade': 'trade', # funds moved result of a trade, spot accounts only
'rebate': 'rebate', # fee rebate fee schedule, spot accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_number(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_number(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = self.safe_symbol(marketId)
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_hostname(self.urls['api']['rest']) + request
type = self.get_path_authentication_type(path)
if (type == 'public') or (type == 'information'):
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return None # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
return None
|
54d464d9f205bc82b96a12f9aa12c09d8e7277b1
|
6b003a8f98f23a8d0ff8f8bd00e30f81ac2ffce1
|
/tests/django20/transactions_regress/models.py
|
e09e81d93d037bbb67930312ef9a9c724fd08202
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
lionheart/django-pyodbc
|
804f91436b82113ab7f78fb8ac5bee8e30af1480
|
22daac2355f2e2c35450b056dcf8b0e8ba6d6190
|
refs/heads/master
| 2023-02-11T19:16:00.269448
| 2023-01-29T16:42:06
| 2023-01-29T16:42:06
| 9,633,067
| 174
| 93
|
Apache-2.0
| 2023-01-29T16:42:07
| 2013-04-23T20:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
models.py
|
from django.db import models
class Mod(models.Model):
fld = models.IntegerField()
class SubMod(Mod):
cnt = models.IntegerField(unique=True)
class M2mA(models.Model):
others = models.ManyToManyField('M2mB')
class M2mB(models.Model):
fld = models.IntegerField()
|
0c79e57ddce6a4c4a039650ea56fda4fe446b279
|
57551a0f6395f3a9aea9b7389ad934cc234e58c0
|
/f5discovery.py
|
67016e62ea949677975affb2ff02993cbad6fbdd
|
[] |
no_license
|
Th4nat0s/Chall_Tools
|
2712df73ab8c2c309537dbef196431b8f7382677
|
7db9f5d38a1c26d74a68e1e967c92429af88a002
|
refs/heads/master
| 2021-12-21T15:54:52.102083
| 2021-12-20T16:53:01
| 2021-12-20T16:53:01
| 6,964,540
| 113
| 50
| null | 2017-07-13T08:26:51
| 2012-12-02T07:45:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
f5discovery.py
|
#!/usr/bin/python
# v 0.1
# Copyleft Thanat0s
# http://Thanat0s.trollprod.org
#
# Licence GNU GPL
# A F5 cookie dissector/finder
import re
import sys
import cookielib
import urllib2
import struct
cookies = cookielib.LWPCookieJar()
handlers = [
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
def fetch(uri):
req = urllib2.Request(uri)
return opener.open(req)
def decode(ltmcook):
(host, port, end) = ltmcook.split('.')
(a, b, c, d) = [ord(i) for i in struct.pack("<I", int(host))]
p = [ord(i) for i in struct.pack("<I", int(port))]
return str(str(a)+'.'+str(b)+'.'+str(c)+'.'+str(d)), p[0]*256 + p[1]
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Scan for a website for a F5 LTM stickiness cookie'
print 'Examples:'
print sys.argv[0] + ' http://thanat0s.trollprod.org'
sys.exit()
uri = sys.argv[1]
res = fetch(uri)
found = False
print '+ Search for Cookies..'
for cookie in cookies:
if re.match(r'^[\d]+\.[\d]+\.[\d]+$', cookie.value):
found = True
ip,port = decode(cookie.value)
print ('\t> %s Backend : %s:%s %s') % (cookie.name,ip,port,'<< Got a F5 one')
else:
print ('\t> %s') % (cookie.name)
if found:
print 'A F5 device was detected'
|
5585acc75283e8e18a2171b7e43c8abb884372c9
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SPPNet/train.py
|
9cb3497eb42619ea89887f444bea3a6b067742db
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,519
|
py
|
train.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## train SPPNet example ########################
train SPPnet and get network model files(.ckpt) :
python train.py --train_path /YourDataPath --eval_path /YourValPath --device_id YourAscendId --train_model model
"""
import ast
import argparse
import os
import mindspore.nn as nn
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore import dataset as de
from mindspore import context
from mindspore import Tensor
from mindspore.train import Model
from mindspore.context import ParallelMode
from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager
from mindspore.train.callback import LossMonitor, TimeMonitor
from mindspore.common import set_seed
from src.config import sppnet_mult_cfg, sppnet_single_cfg, zfnet_cfg
from src.dataset import create_dataset_imagenet
from src.generator_lr import warmup_cosine_annealing_lr
from src.sppnet import SppNet
from src.eval_callback import EvalCallBack, EvalCallBackMult
set_seed(44)
de.config.set_seed(44)
parser = argparse.ArgumentParser(description='MindSpore SPPNet')
parser.add_argument('--sink_size', type=int, default=-1, help='control the amount of data in each sink')
parser.add_argument('--train_model', type=str, default='sppnet_single', help='chose the training model',
choices=['sppnet_single', 'sppnet_mult', 'zfnet'])
parser.add_argument('--device_target', type=str, default="Ascend", help='chose the device for train',
choices=['Ascend', 'GPU'])
parser.add_argument('--train_path', type=str,
default="./imagenet_original/train",
help='path where the train dataset is saved')
parser.add_argument('--eval_path', type=str,
default="./imagenet_original/val",
help='path where the validate dataset is saved')
parser.add_argument('--is_distributed', type=int, default=0, help='distributed training')
parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
path where the trained ckpt file')
parser.add_argument('--dataset_sink_mode', type=ast.literal_eval,
default=True, help='dataset_sink_mode is False or True')
parser.add_argument('--device_id', type=int, default=0, help='device id of Ascend. (Default: 0)')
parser.add_argument('--device_num', type=int, default=1)
args = parser.parse_args()
def apply_eval(eval_param):
"""construct eval function"""
eval_model = eval_param["model"]
eval_ds = eval_param["dataset"]
res = eval_model.eval(eval_ds)
return res
if __name__ == "__main__":
device_num = args.device_num
device_target = args.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
context.set_context(save_graphs=False)
if args.is_distributed:
if args.device_target == "Ascend":
context.set_context(device_id=args.device_id)
init("hccl")
else:
assert args.device_target == "GPU"
init("nccl")
args.device_id = get_rank()
args.device_num = get_group_size()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=args.device_num, parallel_mode=ParallelMode.DATA_PARALLEL)
else:
context.set_context(device_id=args.device_id)
if args.train_model == "zfnet":
cfg = zfnet_cfg
ds_train = create_dataset_imagenet(args.train_path, 'zfnet', cfg.batch_size)
network = SppNet(cfg.num_classes, phase='train', train_model=args.train_model)
prefix = "checkpoint_zfnet"
elif args.train_model == "sppnet_single":
cfg = sppnet_single_cfg
ds_train = create_dataset_imagenet(args.train_path, cfg.batch_size)
network = SppNet(cfg.num_classes, phase='train', train_model=args.train_model)
prefix = "checkpoint_sppnet"
else:
cfg = sppnet_mult_cfg
ds_train = create_dataset_imagenet(args.train_path, 'sppnet_mult', cfg.batch_size)
network = SppNet(cfg.num_classes, phase='train', train_model=args.train_model)
prefix = "checkpoint_sppnet"
if ds_train.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
loss_scale_manager = None
metrics = {'top_1_accuracy', 'top_5_accuracy'}
step_per_epoch = ds_train.get_dataset_size() if args.sink_size == -1 else args.sink_size
# loss function
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
# learning rate generator
lr = Tensor(warmup_cosine_annealing_lr(lr=cfg.lr_init, steps_per_epoch=step_per_epoch,
warmup_epochs=cfg.warmup_epochs, max_epoch=cfg.epoch_size,
iteration_max=cfg.iteration_max, lr_min=cfg.lr_min))
decay_params = []
no_decay_params = []
for x in network.trainable_params():
parameter_name = x.name
if parameter_name.endswith('.bias'):
no_decay_params.append(x)
elif parameter_name.endswith('.gamma'):
no_decay_params.append(x)
elif parameter_name.endswith('.beta'):
no_decay_params.append(x)
else:
decay_params.append(x)
params = [{'params': no_decay_params, 'weight_decay': 0.0, "lr": lr}, {'params': decay_params, "lr": lr}]
# Optimizer
opt = nn.Momentum(params=params,
learning_rate=lr,
momentum=cfg.momentum,
weight_decay=cfg.weight_decay,
loss_scale=cfg.loss_scale)
if cfg.is_dynamic_loss_scale == 1:
loss_scale_manager = DynamicLossScaleManager(init_loss_scale=65536, scale_factor=2, scale_window=2000)
else:
loss_scale_manager = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False)
model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", keep_batchnorm_fp32=False,
loss_scale_manager=loss_scale_manager)
if device_num > 1:
ckpt_save_dir = os.path.join(args.ckpt_path + "_" + str(get_rank()))
else:
ckpt_save_dir = os.path.join(args.ckpt_path)
# callback
eval_dataset = create_dataset_imagenet(args.eval_path, cfg.batch_size, training=False)
evalParamDict = {"model": model, "dataset": eval_dataset}
if args.train_model == "sppnet_mult":
eval_cb = EvalCallBackMult(apply_eval, evalParamDict, eval_start_epoch=1, ckpt_directory=ckpt_save_dir)
else:
eval_cb = EvalCallBack(apply_eval, evalParamDict, eval_start_epoch=1, train_model_name=args.train_model,
ckpt_directory=ckpt_save_dir)
loss_cb = LossMonitor(per_print_times=step_per_epoch)
time_cb = TimeMonitor(data_size=step_per_epoch)
print("============== Starting Training ==============")
if args.train_model == "sppnet_mult":
ds_train_180 = create_dataset_imagenet(args.train_path, 'sppnet_mult', cfg.batch_size,
training=True, image_size=180)
for per_epoch in range(cfg.epoch_size):
print("================ Epoch:{} ==================".format(per_epoch+1))
if per_epoch % 2 == 0:
cb = [time_cb, loss_cb, eval_cb]
model.train(1, ds_train, callbacks=cb, dataset_sink_mode=False, sink_size=args.sink_size)
else:
cb = [time_cb, loss_cb]
model.train(1, ds_train_180, callbacks=cb, dataset_sink_mode=False, sink_size=args.sink_size)
else:
cb = [time_cb, loss_cb, eval_cb]
model.train(cfg.epoch_size, ds_train, callbacks=cb, dataset_sink_mode=True, sink_size=args.sink_size)
|
e672b61e94200f2ac0db29d70362502cde6777dd
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/usb_gadget/composite_gadget.py
|
04985ddb81d4a2ee726bec527c2788c6118caeb6
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 9,378
|
py
|
composite_gadget.py
|
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A composite USB gadget is built from multiple USB features.
"""
import gadget
import usb_constants
import usb_descriptors
class CompositeGadget(gadget.Gadget):
"""Basic functionality for a composite USB device.
Composes multiple USB features into a single device.
"""
def __init__(self, device_desc, features):
"""Create a USB gadget device.
Args:
device_desc: USB device descriptor.
features: USB device features.
"""
# dicts mapping interface numbers to features for FS and HS configurations
self._fs_interface_feature_map = {}
self._hs_interface_feature_map = {}
fs_config_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0x80,
MaxPower=50)
hs_config_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0x80,
MaxPower=50)
for feature in features:
for fs_interface in feature.GetFullSpeedInterfaces():
fs_config_desc.AddInterface(fs_interface)
self._fs_interface_feature_map[fs_interface.bInterfaceNumber] = feature
for hs_interface in feature.GetHighSpeedInterfaces():
hs_config_desc.AddInterface(hs_interface)
self._hs_interface_feature_map[hs_interface.bInterfaceNumber] = feature
super(CompositeGadget, self).__init__(
device_desc, fs_config_desc, hs_config_desc)
self._features = features
def Connected(self, chip, speed):
super(CompositeGadget, self).Connected(chip, speed)
for feature in self._features:
feature.Connected(self)
def Disconnected(self):
super(CompositeGadget, self).Disconnected()
for feature in self._features:
feature.Disconnected()
def _GetInterfaceFeatureMap(self):
if self.GetSpeed() == usb_constants.Speed.FULL:
return self._fs_interface_feature_map
elif self.GetSpeed() == usb_constants.Speed.HIGH:
return self._hs_interface_feature_map
else:
raise RuntimeError('Device is not connected.')
def ReceivePacket(self, endpoint, data):
interface = self.GetInterfaceForEndpoint(endpoint)
feature = self._GetInterfaceFeatureMap()[interface]
feature.ReceivePacket(endpoint, data)
def _GetFeatureForIndex(self, recipient, index):
interface = None
if recipient == usb_constants.Recipient.INTERFACE:
interface = index
elif recipient == usb_constants.Recipient.ENDPOINT:
interface = self.GetInterfaceForEndpoint(index)
if interface is not None:
return self._GetInterfaceFeatureMap().get(interface)
return None
def StandardControlRead(self, recipient, request, value, index, length):
response = super(CompositeGadget, self).StandardControlRead(
recipient, request, value, index, length)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.StandardControlRead(
recipient, request, value, index, length)
def StandardControlWrite(self, recipient, request, value, index, data):
response = super(CompositeGadget, self).StandardControlWrite(
recipient, request, value, index, data)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.StandardControlWrite(
recipient, request, value, index, data)
def ClassControlRead(self, recipient, request, value, index, length):
response = super(CompositeGadget, self).ClassControlRead(
recipient, request, value, index, length)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.ClassControlRead(recipient, request, value, index, length)
def ClassControlWrite(self, recipient, request, value, index, data):
response = super(CompositeGadget, self).ClassControlWrite(
recipient, request, value, index, data)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.ClassControlWrite(recipient, request, value, index, data)
def VendorControlRead(self, recipient, request, value, index, length):
response = super(CompositeGadget, self).VendorControlRead(
recipient, request, value, index, length)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.VendorControlRead(recipient, request, value, index, length)
def VendorControlWrite(self, recipient, request, value, index, data):
response = super(CompositeGadget, self).VendorControlWrite(
recipient, request, value, index, data)
if response is not None:
return response
feature = self._GetFeatureForIndex(recipient, index)
if feature:
return feature.VendorControlWrite(recipient, request, value, index, data)
class CompositeFeature(object):
def __init__(self, fs_interface_descs, hs_interface_descs):
self._gadget = None
self._fs_interface_descs = fs_interface_descs
self._hs_interface_descs = hs_interface_descs
def GetFullSpeedInterfaces(self):
return self._fs_interface_descs
def GetHighSpeedInterfaces(self):
return self._hs_interface_descs
def Connected(self, my_gadget):
self._gadget = my_gadget
def Disconnected(self):
self._gadget = None
def IsConnected(self):
return self._gadget is not None
def SendPacket(self, endpoint, data):
if self._gadget is None:
raise RuntimeError('Device is not connected.')
self._gadget.SendPacket(endpoint, data)
def HaltEndpoint(self, endpoint):
if self._gadget is None:
raise RuntimeError('Device is not connected.')
self._gadget.HaltEndpoint(endpoint)
def GetDescriptor(self, recipient, typ, index, lang, length):
_ = recipient, typ, index, lang, length
return None
def StandardControlRead(self, recipient, request, value, index, length):
"""Handle standard USB control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
_ = recipient, request, value, index, length
return None
def ClassControlRead(self, recipient, request, value, index, length):
"""Handle class-specific control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
_ = recipient, request, value, index, length
return None
def VendorControlRead(self, recipient, request, value, index, length):
"""Handle vendor-specific control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
length: Maximum amount of data the host expects the device to return.
Returns:
A buffer to return to the USB host with len <= length on success or
None to stall the pipe.
"""
_ = recipient, request, value, index, length
return None
def StandardControlWrite(self, recipient, request, value, index, data):
"""Handle standard USB control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = recipient, request, value, index, data
return None
def ClassControlWrite(self, recipient, request, value, index, data):
"""Handle class-specific control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = recipient, request, value, index, data
return None
def VendorControlWrite(self, recipient, request, value, index, data):
"""Handle vendor-specific control transfers.
Args:
recipient: Request recipient (interface or endpoint)
request: bRequest field of the setup packet.
value: wValue field of the setup packet.
index: wIndex field of the setup packet.
data: Data stage of the request.
Returns:
True on success, None to stall the pipe.
"""
_ = recipient, request, value, index, data
return None
|
cd8ad9a27c55978f8c73c5e425c7e50c415d7db8
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/library/einsum_blas_test.py
|
a474ff523c626ee7b87089eaeb7d0f8ab046d7fe
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
einsum_blas_test.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
"""
These tests are mostly from daceml, testing that the einsums for the BERT encoder are correctly specialized to BLAS
nodes.
"""
import pytest
import numpy as np
import dace
from dace.library import change_default
from dace.libraries import blas
MKL_AND_CUBLAS = [pytest.param("cuBLAS", marks=pytest.mark.gpu), pytest.param("MKL", marks=pytest.mark.mkl)]
def test_change_default():
old_default = blas.default_implementation
blas.default_implementation = "hello"
with change_default(blas, "MKL"):
assert blas.default_implementation == "MKL"
assert blas.default_implementation == "hello"
blas.default_implementation = old_default
def assert_used_environment(sdfg, impl):
implementation_to_env = {
"MKL": blas.environments.IntelMKL.full_class_path(),
"cuBLAS": blas.environments.cuBLAS.full_class_path()
}
all_tasklets = (n for n, _ in sdfg.all_nodes_recursive() if isinstance(n, dace.nodes.Tasklet))
environments = {env for n in all_tasklets for env in n.environments}
assert implementation_to_env[impl] in environments
@pytest.mark.mkl
def test_gemm_fails_storage_mkl():
with change_default(blas, "MKL"):
with pytest.raises(ValueError) as info:
@dace.program
def test_failing_mkl(A: dace.float32[10, 5], B: dace.float32[5, 3], C: dace.float32[10, 3]):
C[:] = A @ B
sdfg = test_failing_mkl.to_sdfg()
sdfg.apply_gpu_transformations()
A = np.random.rand(10, 5).astype(np.float32)
B = np.random.rand(5, 3).astype(np.float32)
C = np.zeros((10, 3)).astype(np.float32)
sdfg(A=A, B=B, C=C)
assert "cannot access" in str(info.value)
@pytest.mark.parametrize("impl", MKL_AND_CUBLAS)
def test_simple(impl):
A_desc = dace.float32[10, 5]
B_desc = dace.float32[5, 3]
C_desc = dace.float32[10, 3]
with change_default(blas, impl):
@dace.program
def test_simple_einsum(A: A_desc, B: B_desc, C: C_desc):
C[:] = np.einsum("ik,kj->ij", A, B)
A = np.random.rand(*A_desc.shape).astype(np.float32)
B = np.random.rand(*B_desc.shape).astype(np.float32)
C = np.zeros(C_desc.shape).astype(np.float32)
sdfg: dace.SDFG = test_simple_einsum.to_sdfg()
sdfg.name = impl + "_einsum_simple"
if impl == "cuBLAS":
sdfg.apply_gpu_transformations()
sdfg.expand_library_nodes()
assert_used_environment(sdfg, impl)
sdfg(A=A, B=B, C=C)
assert np.allclose(A @ B, C)
@pytest.mark.parametrize("impl", MKL_AND_CUBLAS)
def test_3x2(impl):
A_desc = dace.float32[8, 10, 12]
B_desc = dace.float32[12, 5]
C_desc = dace.float32[8, 10, 5]
with change_default(blas, impl):
@dace.program
def test_3x2(A: A_desc, B: B_desc, C: C_desc):
C[:] = np.einsum("aik,kj->aij", A, B)
A = np.random.rand(*A_desc.shape).astype(np.float32)
B = np.random.rand(*B_desc.shape).astype(np.float32)
C = np.zeros(C_desc.shape).astype(np.float32)
sdfg: dace.SDFG = test_3x2.to_sdfg()
sdfg.name = impl + "_einsum_3x2"
if impl == "cuBLAS":
sdfg.apply_gpu_transformations()
sdfg.expand_library_nodes()
assert_used_environment(sdfg, impl)
sdfg(A=A, B=B, C=C)
assert np.allclose(A @ B, C)
@pytest.mark.parametrize("impl", MKL_AND_CUBLAS)
def test_4x4(impl):
A_desc = dace.float32[8, 12, 5, 3]
B_desc = dace.float32[8, 12, 3, 6]
C_desc = dace.float32[8, 12, 5, 6]
with change_default(blas, impl):
@dace.program
def test_4x4(A: A_desc, B: B_desc, C: C_desc):
C[:] = np.einsum("abik,abkj->abij", A, B)
A = np.random.rand(*A_desc.shape).astype(np.float32)
B = np.random.rand(*B_desc.shape).astype(np.float32)
C = np.zeros(C_desc.shape).astype(np.float32)
sdfg: dace.SDFG = test_4x4.to_sdfg()
sdfg.name = impl + "_einsum_4x4"
if impl == "cuBLAS":
sdfg.apply_gpu_transformations()
sdfg.expand_library_nodes()
assert_used_environment(sdfg, impl)
sdfg(A=A, B=B, C=C)
assert np.allclose(A @ B, C)
|
8480032949f2382ffd0fd109f7775d7c2b85ced4
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/functions/performance/timeMeasure/timeMeasure.py
|
3e0f5e432076900d56f62080857edebcebc13bce
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
timeMeasure.py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:timeMeasure.py
User: Guodong
Create Date: 2017/1/16
Create Time: 9:50
Refer: http://www.jb51.net/article/63244.htm
Others:
1. 'time' command in UNIX system or Linux system
2. cProfile Module in Python
3. line_profiler Module in Python
4. memory_profiler Module in Python
"永远不要使用 except: 语句来捕获所有异常, 也不要捕获 Exception 或者 StandardError ,
除非你打算重新触发该异常, 或者你已经在当前线程的最外层(记得还是要打印一条错误消息).
在异常这方面, Python非常宽容, except: 真的会捕获包括Python语法错误在内的任何错误.
使用 except: 很容易隐藏真正的bug." -- [《Google Python 风格指南》]
(https://google-styleguide.readthedocs.io/zh_CN/latest/google-python-styleguide/python_language_rules.html)
Tips: exception不要太宽泛,且try-except应该放在发生exception最近的地方,不然debug起来可能比较费劲。
"""
def fn_timer(func):
from functools import wraps
@wraps(func)
def function_timer(*args, **kwargs):
import time
time_begin = time.time()
result = func(*args, **kwargs)
time_end = time.time()
print "Total time running {function_name}: {time_spent} seconds".format(function_name=func.func_name,
time_spent=(time_end - time_begin))
return result
return function_timer
def fn_timer_py2py3(func):
"""
测量函数执行所用时间的装饰器
https://stackoverflow.com/questions/8885663/how-to-format-a-floating-number-to-fixed-width-in-python
:param func:
:return:
"""
from functools import wraps
@wraps(func)
def func_timer(*args, **kwargs):
import time
time_begin = time.time()
result = None
try:
result = func(*args, **kwargs)
except Exception as e:
print(e.message)
time_end = time.time()
print("Total time running {func_name}: {time_spent:16.8f} seconds".format(func_name=func.__name__,
time_spent=time_end - time_begin))
return result
return func_timer
@fn_timer_py2py3
@fn_timer
def _random_sort(n):
import random
return sorted([random.random() for _ in range(n)])
if __name__ == "__main__":
_random_sort(2000000)
|
05912a9e349b49d37140e902a3e1b51357eea7ce
|
b2bcf07493b5a1bbfb7e29c7f13ac0b380cefead
|
/deprecated/scripts/transposed_conv_torch.py
|
8f1bfa672427231840663abf18456964d1bf4a94
|
[
"MIT"
] |
permissive
|
probml/pyprobml
|
e1952927bceec676eb414f9342470ba4b8e6703b
|
9cc22f3238ae092c2b9bff65d6283c93d38d25d4
|
refs/heads/master
| 2023-08-31T07:36:11.603301
| 2023-08-13T02:47:12
| 2023-08-13T02:47:12
| 65,924,871
| 6,263
| 1,598
|
MIT
| 2023-01-20T23:34:23
| 2016-08-17T16:42:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
transposed_conv_torch.py
|
# Based on http://d2l.ai/chapter_computer-vision/transposed-conv.html
import superimport
import torch
from torch import nn
import numpy as np
def trans_conv(X, K):
h, w = K.shape
Y = torch.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Y[i:i + h, j:j + w] += X[i, j] * K
return Y
# Example from D2L fig 13.10.1
X = torch.tensor([[0., 1], [2, 3]])
K = torch.tensor([[0., 1], [2, 3]])
Y = trans_conv(X, K)
print(Y)
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, bias=False)
tconv.weight.data = K
Y2 = tconv(X)
#print(Y2)
assert torch.allclose(Y, Y2)
'''
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, padding = 1, bias=False)
tconv.weight.data = K
Y2 = tconv(X)
print('Y2', Y2)
'''
# Transposed Matrix multiplication
K = torch.tensor([[1,2], [3, 4]])
def kernel2matrix(K):
k, W = torch.zeros(5), torch.zeros((4, 9))
k[:2], k[3:5] = K[0, :], K[1, :]
W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k
return W
W = kernel2matrix(K)
X = torch.tensor([[0.0, 1], [2, 3]])
Y = trans_conv(X, K)
Y2 = torch.mv(W.T, X.reshape(-1)).reshape(3, 3)
assert torch.allclose(Y, Y2)
# Example from Geron fig 14.27
X = torch.ones((2,3))
K = torch.ones(3,3)
X, K = X.reshape(1, 1, 2, 3), K.reshape(1, 1, 3, 3)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=3, stride=2, bias=False)
tconv.weight.data = K
Y2 = tconv(X)
print(Y2.shape)
|
5af38d0e13bc011fe7f5eadd619bd9f28c6de827
|
9b46842f67129d8b3df3b151d5f1311c22e99745
|
/runlike/runlike.py
|
77fd2447199562f13471b2b818d12b158932b817
|
[
"BSD-2-Clause-Views"
] |
permissive
|
lavie/runlike
|
4584b22fa54bd427ca631826479a459f50f914a5
|
1e5a8a279f48d5ab95b04d4e5ce81bfe80d7708b
|
refs/heads/master
| 2023-03-08T10:46:48.943787
| 2023-02-25T20:19:38
| 2023-02-25T20:19:38
| 36,849,312
| 1,609
| 96
|
NOASSERTION
| 2023-02-25T22:45:41
| 2015-06-04T05:12:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
runlike.py
|
#!/usr/bin/env python
import click
import sys
try:
from .inspector import Inspector
except (ValueError, ImportError):
from inspector import Inspector
@click.command(
help="Shows command line necessary to run copy of existing Docker container.")
@click.argument("container", required=False)
@click.option(
"--no-name",
is_flag=True,
help="Do not include container name in output")
@click.option("-p", "--pretty", is_flag=True)
@click.option("-s", "--stdin", is_flag=True)
def cli(container, no_name, pretty, stdin):
# TODO: -i, -t, -d as added options that override the inspection
if container or stdin:
ins = Inspector(container, no_name, pretty)
if container:
ins.inspect()
elif stdin:
raw_json = click.get_text_stream('stdin').read()
ins.set_container_facts(raw_json)
print(ins.format_cli())
else:
raise click.UsageError("usage error")
def main():
cli()
if __name__ == "__main__":
main()
|
794b2cac5ce1f44bb3a19347a7144c7a248f73f8
|
40b42ccf2b6959d6fce74509201781be96f04475
|
/mmocr/models/textdet/detectors/ocr_mask_rcnn.py
|
3cfbff57856fed3066df9548e80d20bc8f4d467e
|
[
"Apache-2.0"
] |
permissive
|
xdxie/WordArt
|
2f1414d8e4edaa89333353d0b28e5096e1f87263
|
89bf8a218881b250d0ead7a0287526c69586c92a
|
refs/heads/main
| 2023-05-23T02:04:22.185386
| 2023-03-06T11:51:43
| 2023-03-06T11:51:43
| 515,485,694
| 106
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
ocr_mask_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.detectors import MaskRCNN
from mmocr.core import seg2boundary
from mmocr.models.builder import DETECTORS
from .text_detector_mixin import TextDetectorMixin
@DETECTORS.register_module()
class OCRMaskRCNN(TextDetectorMixin, MaskRCNN):
"""Mask RCNN tailored for OCR."""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
text_repr_type='quad',
show_score=False,
init_cfg=None):
TextDetectorMixin.__init__(self, show_score)
MaskRCNN.__init__(
self,
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert text_repr_type in ['quad', 'poly']
self.text_repr_type = text_repr_type
def get_boundary(self, results):
"""Convert segmentation into text boundaries.
Args:
results (tuple): The result tuple. The first element is
segmentation while the second is its scores.
Returns:
dict: A result dict containing 'boundary_result'.
"""
assert isinstance(results, tuple)
instance_num = len(results[1][0])
boundaries = []
for i in range(instance_num):
seg = results[1][0][i]
score = results[0][0][i][-1]
boundary = seg2boundary(seg, self.text_repr_type, score)
if boundary is not None:
boundaries.append(boundary)
results = dict(boundary_result=boundaries)
return results
def simple_test(self, img, img_metas, proposals=None, rescale=False):
results = super().simple_test(img, img_metas, proposals, rescale)
boundaries = self.get_boundary(results[0])
boundaries = boundaries if isinstance(boundaries,
list) else [boundaries]
return boundaries
|
426a61bcec773c2cdfe839d07360916f7f9aabde
|
2270e0fb290591a21fd13a3980dccf4ff47d83fa
|
/tf2_gnn/test/layers/test_RGAT.py
|
1609365b1423ba9be13a4727f301cb50e31c3870
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/tf2-gnn
|
d0251e48c0d7cfa67fbd4d1cd8579cbf6845059b
|
fa608555c68c41027a44e1ec68c160e92b570632
|
refs/heads/master
| 2023-08-30T01:09:37.653689
| 2023-07-13T13:47:00
| 2023-07-13T13:47:00
| 242,964,823
| 411
| 80
|
MIT
| 2023-07-12T18:33:59
| 2020-02-25T09:49:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
test_RGAT.py
|
"""Tests for the RGAT message passing layer."""
import tensorflow as tf
import pytest
from tf2_gnn.layers.message_passing import MessagePassingInput, RGAT
# fmt: off
shape_test_data = [
(
tf.TensorShape(dims=(None, 3)),
tuple(tf.TensorShape(dims=(None, 2)) for _ in range(3)),
16,
8,
),
(
tf.TensorShape(dims=(None, 1)),
tuple(tf.TensorShape(dims=(None, 2)) for _ in range(1)),
2,
1,
),
(
tf.TensorShape(dims=(None, 7)),
tuple(tf.TensorShape(dims=(None, 2)) for _ in range(14)),
64,
4,
),
]
# fmt: on
@pytest.mark.parametrize(
"node_embedding_shape,adjacency_list_shapes,hidden_dim,num_heads", shape_test_data
)
def test_rgat_layer_has_expected_number_of_trainable_variables(
node_embedding_shape, adjacency_list_shapes, hidden_dim, num_heads
):
# Given:
rgat_params = RGAT.get_default_hyperparameters()
rgat_params["hidden_dim"] = hidden_dim
rgat_params["num_heads"] = num_heads
rgat_layer = RGAT(rgat_params)
# When:
rgat_layer.build(
MessagePassingInput(
node_embeddings=node_embedding_shape, adjacency_lists=adjacency_list_shapes
)
)
trainable_vars = rgat_layer.trainable_variables
all_vars = rgat_layer.variables
# Then:
# There should be 1 dense layer and 1 attention weight per layer type
assert len(trainable_vars) == 2 * len(adjacency_list_shapes)
assert len(all_vars) == len(trainable_vars) # There should be no un-trainable variables.
for trainable_var in trainable_vars:
if "kernel" in trainable_var.name:
assert tuple(trainable_var.shape.as_list()) == (node_embedding_shape[-1], hidden_dim)
elif "attention" in trainable_var.name:
assert tuple(trainable_var.shape.as_list()) == (num_heads, 2 * hidden_dim / num_heads)
else:
assert False # There should be no other trainable variable types.
|
5a930fc6225a0c25a18075e0eaae97dc751f139b
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/pillows/domain.py
|
854712cab6644e457d99c565c1a371003a0d44a4
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
domain.py
|
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed, KafkaCheckpointEventHandler
from corehq.apps.change_feed import topics
from corehq.apps.domain.models import Domain
from corehq.apps.es.domains import domain_adapter
from corehq.util.doc_processor.couch import CouchDocumentProvider
from pillowtop.checkpoints.manager import get_checkpoint_for_elasticsearch_pillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import ElasticProcessor
from pillowtop.reindexer.reindexer import ResumableBulkElasticPillowReindexer, ReindexerFactory
def get_domain_kafka_to_elasticsearch_pillow(pillow_id='KafkaDomainPillow', num_processes=1,
process_num=0, **kwargs):
"""Domain pillow to replicate documents to ES
Processors:
- :py:class:`pillowtop.processors.elastic.ElasticProcessor`
"""
assert pillow_id == 'KafkaDomainPillow', 'Pillow ID is not allowed to change'
checkpoint = get_checkpoint_for_elasticsearch_pillow(pillow_id, domain_adapter.index_name, [topics.DOMAIN])
domain_processor = ElasticProcessor(domain_adapter)
change_feed = KafkaChangeFeed(
topics=[topics.DOMAIN], client_id='domains-to-es', num_processes=num_processes, process_num=process_num
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=change_feed,
processor=domain_processor,
change_processed_event_handler=KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100, change_feed=change_feed
),
)
class DomainReindexerFactory(ReindexerFactory):
slug = 'domain'
arg_contributors = [
ReindexerFactory.resumable_reindexer_args,
ReindexerFactory.elastic_reindexer_args
]
def build(self):
iteration_key = "DomainToElasticsearchPillow_{}_reindexer".format(domain_adapter.index_name)
doc_provider = CouchDocumentProvider(iteration_key, [Domain])
options = {
'chunk_size': 5
}
options.update(self.options)
return ResumableBulkElasticPillowReindexer(
doc_provider,
domain_adapter,
pillow=get_domain_kafka_to_elasticsearch_pillow(),
**options
)
|
b9fb576c01f88efdb008312900d99a81dbf933ba
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/costmanagement/v20190301preview/cloud_connector.py
|
813354835f89688374a1efdfe924a7723763b54a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 17,895
|
py
|
cloud_connector.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['CloudConnectorArgs', 'CloudConnector']
@pulumi.input_type
class CloudConnectorArgs:
def __init__(__self__, *,
billing_model: Optional[pulumi.Input[Union[str, 'ConnectorBillingModel']]] = None,
connector_name: Optional[pulumi.Input[str]] = None,
credentials_key: Optional[pulumi.Input[str]] = None,
credentials_secret: Optional[pulumi.Input[str]] = None,
default_management_group_id: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
report_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CloudConnector resource.
:param pulumi.Input[Union[str, 'ConnectorBillingModel']] billing_model: Connector billing model
:param pulumi.Input[str] connector_name: Connector Name.
:param pulumi.Input[str] credentials_key: Credentials authentication key (eg AWS ARN)
:param pulumi.Input[str] credentials_secret: Credentials secret (eg AWS ExternalId)
:param pulumi.Input[str] default_management_group_id: Default ManagementGroupId
:param pulumi.Input[str] display_name: Connector DisplayName
:param pulumi.Input[str] kind: Connector kind (eg aws)
:param pulumi.Input[str] report_id: Identifying source report. (For AWS this is a CUR report name, defined with Daily and with Resources)
:param pulumi.Input[str] subscription_id: Billing SubscriptionId
"""
if billing_model is not None:
pulumi.set(__self__, "billing_model", billing_model)
if connector_name is not None:
pulumi.set(__self__, "connector_name", connector_name)
if credentials_key is not None:
pulumi.set(__self__, "credentials_key", credentials_key)
if credentials_secret is not None:
pulumi.set(__self__, "credentials_secret", credentials_secret)
if default_management_group_id is not None:
pulumi.set(__self__, "default_management_group_id", default_management_group_id)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if report_id is not None:
pulumi.set(__self__, "report_id", report_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> Optional[pulumi.Input[Union[str, 'ConnectorBillingModel']]]:
"""
Connector billing model
"""
return pulumi.get(self, "billing_model")
@billing_model.setter
def billing_model(self, value: Optional[pulumi.Input[Union[str, 'ConnectorBillingModel']]]):
pulumi.set(self, "billing_model", value)
@property
@pulumi.getter(name="connectorName")
def connector_name(self) -> Optional[pulumi.Input[str]]:
"""
Connector Name.
"""
return pulumi.get(self, "connector_name")
@connector_name.setter
def connector_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connector_name", value)
@property
@pulumi.getter(name="credentialsKey")
def credentials_key(self) -> Optional[pulumi.Input[str]]:
"""
Credentials authentication key (eg AWS ARN)
"""
return pulumi.get(self, "credentials_key")
@credentials_key.setter
def credentials_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials_key", value)
@property
@pulumi.getter(name="credentialsSecret")
def credentials_secret(self) -> Optional[pulumi.Input[str]]:
"""
Credentials secret (eg AWS ExternalId)
"""
return pulumi.get(self, "credentials_secret")
@credentials_secret.setter
def credentials_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials_secret", value)
@property
@pulumi.getter(name="defaultManagementGroupId")
def default_management_group_id(self) -> Optional[pulumi.Input[str]]:
"""
Default ManagementGroupId
"""
return pulumi.get(self, "default_management_group_id")
@default_management_group_id.setter
def default_management_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_management_group_id", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Connector DisplayName
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Connector kind (eg aws)
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="reportId")
def report_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifying source report. (For AWS this is a CUR report name, defined with Daily and with Resources)
"""
return pulumi.get(self, "report_id")
@report_id.setter
def report_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "report_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
Billing SubscriptionId
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
class CloudConnector(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
billing_model: Optional[pulumi.Input[Union[str, 'ConnectorBillingModel']]] = None,
connector_name: Optional[pulumi.Input[str]] = None,
credentials_key: Optional[pulumi.Input[str]] = None,
credentials_secret: Optional[pulumi.Input[str]] = None,
default_management_group_id: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
report_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Connector model definition
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ConnectorBillingModel']] billing_model: Connector billing model
:param pulumi.Input[str] connector_name: Connector Name.
:param pulumi.Input[str] credentials_key: Credentials authentication key (eg AWS ARN)
:param pulumi.Input[str] credentials_secret: Credentials secret (eg AWS ExternalId)
:param pulumi.Input[str] default_management_group_id: Default ManagementGroupId
:param pulumi.Input[str] display_name: Connector DisplayName
:param pulumi.Input[str] kind: Connector kind (eg aws)
:param pulumi.Input[str] report_id: Identifying source report. (For AWS this is a CUR report name, defined with Daily and with Resources)
:param pulumi.Input[str] subscription_id: Billing SubscriptionId
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CloudConnectorArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Connector model definition
:param str resource_name: The name of the resource.
:param CloudConnectorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CloudConnectorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
billing_model: Optional[pulumi.Input[Union[str, 'ConnectorBillingModel']]] = None,
connector_name: Optional[pulumi.Input[str]] = None,
credentials_key: Optional[pulumi.Input[str]] = None,
credentials_secret: Optional[pulumi.Input[str]] = None,
default_management_group_id: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
report_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CloudConnectorArgs.__new__(CloudConnectorArgs)
__props__.__dict__["billing_model"] = billing_model
__props__.__dict__["connector_name"] = connector_name
__props__.__dict__["credentials_key"] = credentials_key
__props__.__dict__["credentials_secret"] = credentials_secret
__props__.__dict__["default_management_group_id"] = default_management_group_id
__props__.__dict__["display_name"] = display_name
__props__.__dict__["kind"] = kind
__props__.__dict__["report_id"] = report_id
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["collection_info"] = None
__props__.__dict__["created_on"] = None
__props__.__dict__["days_trial_remaining"] = None
__props__.__dict__["external_billing_account_id"] = None
__props__.__dict__["modified_on"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provider_billing_account_display_name"] = None
__props__.__dict__["provider_billing_account_id"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:costmanagement:CloudConnector"), pulumi.Alias(type_="azure-native:costmanagement/v20180801preview:CloudConnector")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CloudConnector, __self__).__init__(
'azure-native:costmanagement/v20190301preview:CloudConnector',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CloudConnector':
"""
Get an existing CloudConnector resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CloudConnectorArgs.__new__(CloudConnectorArgs)
__props__.__dict__["billing_model"] = None
__props__.__dict__["collection_info"] = None
__props__.__dict__["created_on"] = None
__props__.__dict__["credentials_key"] = None
__props__.__dict__["days_trial_remaining"] = None
__props__.__dict__["default_management_group_id"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["external_billing_account_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["modified_on"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provider_billing_account_display_name"] = None
__props__.__dict__["provider_billing_account_id"] = None
__props__.__dict__["report_id"] = None
__props__.__dict__["status"] = None
__props__.__dict__["subscription_id"] = None
__props__.__dict__["type"] = None
return CloudConnector(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> pulumi.Output[Optional[str]]:
"""
Connector billing model
"""
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="collectionInfo")
def collection_info(self) -> pulumi.Output['outputs.ConnectorCollectionInfoResponse']:
"""
Collection information
"""
return pulumi.get(self, "collection_info")
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> pulumi.Output[str]:
"""
Connector definition creation datetime
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="credentialsKey")
def credentials_key(self) -> pulumi.Output[Optional[str]]:
"""
Credentials authentication key (eg AWS ARN)
"""
return pulumi.get(self, "credentials_key")
@property
@pulumi.getter(name="daysTrialRemaining")
def days_trial_remaining(self) -> pulumi.Output[int]:
"""
Number of days remaining of trial
"""
return pulumi.get(self, "days_trial_remaining")
@property
@pulumi.getter(name="defaultManagementGroupId")
def default_management_group_id(self) -> pulumi.Output[Optional[str]]:
"""
Default ManagementGroupId
"""
return pulumi.get(self, "default_management_group_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
Connector DisplayName
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="externalBillingAccountId")
def external_billing_account_id(self) -> pulumi.Output[str]:
"""
Associated ExternalBillingAccountId
"""
return pulumi.get(self, "external_billing_account_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Connector kind (eg aws)
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="modifiedOn")
def modified_on(self) -> pulumi.Output[str]:
"""
Connector last modified datetime
"""
return pulumi.get(self, "modified_on")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Connector name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerBillingAccountDisplayName")
def provider_billing_account_display_name(self) -> pulumi.Output[str]:
"""
The display name of the providerBillingAccountId as defined on the external provider
"""
return pulumi.get(self, "provider_billing_account_display_name")
@property
@pulumi.getter(name="providerBillingAccountId")
def provider_billing_account_id(self) -> pulumi.Output[str]:
"""
Connector providerBillingAccountId, determined from credentials (eg AWS Consolidated account number)
"""
return pulumi.get(self, "provider_billing_account_id")
@property
@pulumi.getter(name="reportId")
def report_id(self) -> pulumi.Output[Optional[str]]:
"""
Identifying source report. (For AWS this is a CUR report name, defined with Daily and with Resources)
"""
return pulumi.get(self, "report_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Connector status
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[Optional[str]]:
"""
Billing SubscriptionId
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connector type
"""
return pulumi.get(self, "type")
|
645f08ed26aa64b2545a94ec5a767968dbd4be7d
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/regression/tests/results/lme_r_results.py
|
61515e0f2b094aacb3633ed49ccfe7dfe9caa74c
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 15,667
|
py
|
lme_r_results.py
|
import numpy as np
coef_ml_drf_0 = np.array([-0.9887517])
vcov_ml_drf_0 = np.array([0.001317148]).reshape(1, 1, order='F')
cov_re_ml_drf_0 = np.array([0.2522485]).reshape(1, 1, order='F')
scale_ml_drf_0 = np.array([0.2718486])
loglike_ml_drf_0 = np.array([-240.1254])
ranef_mean_ml_drf_0 = np.array([0.04977167])
ranef_condvar_ml_drf_0 = np.array([0.130841])
coef_reml_drf_0 = np.array([-0.9887533])
vcov_reml_drf_0 = np.array([0.001323559]).reshape(1, 1, order='F')
cov_re_reml_drf_0 = np.array([0.2524129]).reshape(1, 1, order='F')
scale_reml_drf_0 = np.array([0.2733467])
loglike_reml_drf_0 = np.array([-242.5214])
ranef_mean_reml_drf_0 = np.array([0.04964696])
ranef_condvar_reml_drf_0 = np.array([0.1312315])
coef_ml_drf_1 = np.array([-0.9115929])
vcov_ml_drf_1 = np.array([0.01340632]).reshape(1, 1, order='F')
cov_re_ml_drf_1 = np.array([0]).reshape(1, 1, order='F')
scale_ml_drf_1 = np.array([4.050921])
loglike_ml_drf_1 = np.array([-538.0763])
ranef_mean_ml_drf_1 = np.array([0])
ranef_condvar_ml_drf_1 = np.array([0])
coef_reml_drf_1 = np.array([-0.9115929])
vcov_reml_drf_1 = np.array([0.01345931]).reshape(1, 1, order='F')
cov_re_reml_drf_1 = np.array([2.839777e-14]).reshape(1, 1, order='F')
scale_reml_drf_1 = np.array([4.066932])
loglike_reml_drf_1 = np.array([-539.3124])
ranef_mean_reml_drf_1 = np.array([2.424384e-14])
ranef_condvar_reml_drf_1 = np.array([2.839777e-14])
coef_ml_drf_2 = np.array([-1.012044, 0.9789052])
vcov_ml_drf_2 = np.array([
0.00117849, 1.458744e-05,
1.458744e-05, 0.001054926]).reshape(2, 2, order='F')
cov_re_ml_drf_2 = np.array([0.1596058]).reshape(1, 1, order='F')
scale_ml_drf_2 = np.array([0.2129146])
loglike_ml_drf_2 = np.array([-200.319])
ranef_mean_ml_drf_2 = np.array([0.3197174])
ranef_condvar_ml_drf_2 = np.array([0.09122291])
coef_reml_drf_2 = np.array([-1.012137, 0.9790792])
vcov_reml_drf_2 = np.array([
0.001190455, 1.482666e-05,
1.482666e-05, 0.001066002]).reshape(2, 2, order='F')
cov_re_reml_drf_2 = np.array([0.1595015]).reshape(1, 1, order='F')
scale_reml_drf_2 = np.array([0.2154276])
loglike_reml_drf_2 = np.array([-205.275])
ranef_mean_reml_drf_2 = np.array([0.3172978])
ranef_condvar_reml_drf_2 = np.array([0.09164674])
coef_ml_drf_3 = np.array([-1.028053, 0.8602685])
vcov_ml_drf_3 = np.array([
0.01398831, 0.001592619, 0.001592619, 0.01602274]).reshape(2, 2, order='F')
cov_re_ml_drf_3 = np.array([0.8130996]).reshape(1, 1, order='F')
scale_ml_drf_3 = np.array([3.100447])
loglike_ml_drf_3 = np.array([-477.1707])
ranef_mean_ml_drf_3 = np.array([-0.2641747])
ranef_condvar_ml_drf_3 = np.array([0.6441656])
coef_reml_drf_3 = np.array([-1.027583, 0.8605714])
vcov_reml_drf_3 = np.array([
0.01411922, 0.001607343, 0.001607343, 0.01617574]).reshape(2, 2, order='F')
cov_re_reml_drf_3 = np.array([0.8117898]).reshape(1, 1, order='F')
scale_reml_drf_3 = np.array([3.13369])
loglike_reml_drf_3 = np.array([-479.5354])
ranef_mean_reml_drf_3 = np.array([-0.2614875])
ranef_condvar_reml_drf_3 = np.array([0.6447625])
coef_ml_drf_4 = np.array([-1.005151, -0.003657404, 1.054786])
vcov_ml_drf_4 = np.array([
0.001190639, -5.327162e-05, 5.992985e-05, -5.327162e-05,
0.001460303, -2.662532e-05, 5.992985e-05, -2.662532e-05,
0.00148609]).reshape(3, 3, order='F')
cov_re_ml_drf_4 = np.array([0.1703249]).reshape(1, 1, order='F')
scale_ml_drf_4 = np.array([0.251763])
loglike_ml_drf_4 = np.array([-231.6389])
ranef_mean_ml_drf_4 = np.array([-0.2063164])
ranef_condvar_ml_drf_4 = np.array([0.0459578])
coef_reml_drf_4 = np.array([-1.005067, -0.003496032, 1.054666])
vcov_reml_drf_4 = np.array([
0.001206925, -5.4182e-05, 6.073475e-05, -5.4182e-05,
0.001479871, -2.723494e-05, 6.073475e-05, -2.723494e-05,
0.001506198]).reshape(3, 3, order='F')
cov_re_reml_drf_4 = np.array([0.1705659]).reshape(1, 1, order='F')
scale_reml_drf_4 = np.array([0.2556394])
loglike_reml_drf_4 = np.array([-238.761])
ranef_mean_reml_drf_4 = np.array([-0.2055303])
ranef_condvar_reml_drf_4 = np.array([0.04649027])
coef_ml_drf_5 = np.array([-0.8949725, 0.08141558, 1.052529])
vcov_ml_drf_5 = np.array([
0.01677563, 0.0008077524, -0.001255011, 0.0008077524,
0.01719346, 0.0009266736, -0.001255011, 0.0009266736,
0.01608435]).reshape(3, 3, order='F')
cov_re_ml_drf_5 = np.array([0.3444677]).reshape(1, 1, order='F')
scale_ml_drf_5 = np.array([4.103944])
loglike_ml_drf_5 = np.array([-579.4568])
ranef_mean_ml_drf_5 = np.array([0.08254713])
ranef_condvar_ml_drf_5 = np.array([0.3177935])
coef_reml_drf_5 = np.array([-0.8946164, 0.08134261, 1.052486])
vcov_reml_drf_5 = np.array([
0.0169698, 0.0008162714, -0.001268635, 0.0008162714,
0.01739219, 0.0009345538, -0.001268635, 0.0009345538,
0.01627074]).reshape(3, 3, order='F')
cov_re_reml_drf_5 = np.array([0.3420993]).reshape(1, 1, order='F')
scale_reml_drf_5 = np.array([4.155737])
loglike_reml_drf_5 = np.array([-582.8377])
ranef_mean_reml_drf_5 = np.array([0.08111449])
ranef_condvar_reml_drf_5 = np.array([0.3160797])
coef_ml_drf_6 = np.array([-0.8885425])
vcov_ml_drf_6 = np.array([0.002443738]).reshape(1, 1, order='F')
cov_re_ml_drf_6 = np.array([
0.2595201, 0.04591071,
0.04591071, 2.204612]).reshape(2, 2, order='F')
scale_ml_drf_6 = np.array([0.243133])
loglike_ml_drf_6 = np.array([-382.551])
ranef_mean_ml_drf_6 = np.array([-0.0597406, 0.6037288])
ranef_condvar_ml_drf_6 = np.array([
0.2420741, 0.2222169,
0.2222169, 0.4228908]).reshape(2, 2, order='F')
coef_reml_drf_6 = np.array([-0.8883881])
vcov_reml_drf_6 = np.array([0.002461581]).reshape(1, 1, order='F')
cov_re_reml_drf_6 = np.array([
0.2595767, 0.04590012,
0.04590012, 2.204822]).reshape(2, 2, order='F')
scale_reml_drf_6 = np.array([0.2453537])
loglike_reml_drf_6 = np.array([-384.6373])
ranef_mean_reml_drf_6 = np.array([-0.05969892, 0.6031793])
ranef_condvar_reml_drf_6 = np.array([
0.2421365, 0.2221108,
0.2221108, 0.4244443]).reshape(2, 2, order='F')
coef_ml_irf_6 = np.array([-0.8874992])
vcov_ml_irf_6 = np.array([0.002445505]).reshape(1, 1, order='F')
cov_re_ml_irf_6 = np.array([
0.2587624, 0,
0, 2.188653]).reshape(2, 2, order='F')
scale_ml_irf_6 = np.array([0.2432694])
loglike_ml_irf_6 = np.array([-382.6581])
coef_reml_irf_6 = np.array([-0.8873394])
vcov_reml_irf_6 = np.array([0.002463375]).reshape(1, 1, order='F')
cov_re_reml_irf_6 = np.array([
0.2588157, 0,
0, 2.188876]).reshape(2, 2, order='F')
scale_reml_irf_6 = np.array([0.2454935])
loglike_reml_irf_6 = np.array([-384.7441])
coef_ml_drf_7 = np.array([-0.9645281])
vcov_ml_drf_7 = np.array([0.01994]).reshape(1, 1, order='F')
cov_re_ml_drf_7 = np.array([
0.2051329, 0.0734377,
0.0734377, 3.285381]).reshape(2, 2, order='F')
scale_ml_drf_7 = np.array([3.423247])
loglike_ml_drf_7 = np.array([-587.7101])
ranef_mean_ml_drf_7 = np.array([0.07007965, -0.2920284])
ranef_condvar_ml_drf_7 = np.array([
0.1823183, 0.02247519,
0.02247519, 1.125011]).reshape(2, 2, order='F')
coef_reml_drf_7 = np.array([-0.9647862])
vcov_reml_drf_7 = np.array([0.02002546]).reshape(1, 1, order='F')
cov_re_reml_drf_7 = np.array([
0.2056226, 0.0726139,
0.0726139, 3.2876]).reshape(2, 2, order='F')
scale_reml_drf_7 = np.array([3.440244])
loglike_reml_drf_7 = np.array([-588.7476])
ranef_mean_reml_drf_7 = np.array([0.07000628, -0.2916737])
ranef_condvar_reml_drf_7 = np.array([
0.1828266, 0.02229138,
0.02229138, 1.128947]).reshape(2, 2, order='F')
coef_ml_irf_7 = np.array([-0.9665524])
vcov_ml_irf_7 = np.array([0.01998144]).reshape(1, 1, order='F')
cov_re_ml_irf_7 = np.array([
0.2021561, 0, 0, 3.270735]).reshape(2, 2, order='F')
scale_ml_irf_7 = np.array([3.423186])
loglike_ml_irf_7 = np.array([-587.7456])
coef_reml_irf_7 = np.array([-0.9667854])
vcov_reml_irf_7 = np.array([0.02006657]).reshape(1, 1, order='F')
cov_re_reml_irf_7 = np.array([
0.2026938, 0, 0, 3.273129]).reshape(2, 2, order='F')
scale_reml_irf_7 = np.array([3.440197])
loglike_reml_irf_7 = np.array([-588.782])
coef_ml_drf_8 = np.array([-1.083882, 0.8955623])
vcov_ml_drf_8 = np.array([
0.002491643, 0.0001693531,
0.0001693531, 0.00253309]).reshape(2, 2, order='F')
cov_re_ml_drf_8 = np.array([
0.1506188, 0.126091, 0.126091, 2.485462]).reshape(2, 2, order='F')
scale_ml_drf_8 = np.array([0.2586519])
loglike_ml_drf_8 = np.array([-363.6234])
ranef_mean_ml_drf_8 = np.array([0.2852326, -0.5047804])
ranef_condvar_ml_drf_8 = np.array([
0.05400391, 0.002330104,
0.002330104, 0.122761]).reshape(2, 2, order='F')
coef_reml_drf_8 = np.array([-1.083938, 0.8956893])
vcov_reml_drf_8 = np.array([
0.002528969, 0.0001712206,
0.0001712206, 0.002573335]).reshape(2, 2, order='F')
cov_re_reml_drf_8 = np.array([
0.1505098, 0.1256311,
0.1256311, 2.484219]).reshape(2, 2, order='F')
scale_reml_drf_8 = np.array([0.2635901])
loglike_reml_drf_8 = np.array([-367.7667])
ranef_mean_reml_drf_8 = np.array([0.2829798, -0.5042857])
ranef_condvar_reml_drf_8 = np.array([
0.05463632, 0.002393538,
0.002393538, 0.1249828]).reshape(2, 2, order='F')
coef_ml_irf_8 = np.array([-1.079481, 0.898216])
vcov_ml_irf_8 = np.array([
0.002511536, 0.0001812511,
0.0001812511, 0.002573405]).reshape(2, 2, order='F')
cov_re_ml_irf_8 = np.array([
0.1498568, 0, 0, 2.403849]).reshape(2, 2, order='F')
scale_ml_irf_8 = np.array([0.2605245])
loglike_ml_irf_8 = np.array([-364.4824])
coef_reml_irf_8 = np.array([-1.07952, 0.8983678])
vcov_reml_irf_8 = np.array([
0.002549354, 0.0001833386,
0.0001833386, 0.002614672]).reshape(2, 2, order='F')
cov_re_reml_irf_8 = np.array([
0.1497193, 0, 0, 2.403076]).reshape(2, 2, order='F')
scale_reml_irf_8 = np.array([0.2655558])
loglike_reml_irf_8 = np.array([-368.6141])
coef_ml_drf_9 = np.array([-1.272698, 0.8617471])
vcov_ml_drf_9 = np.array([
0.02208544, 0.001527479, 0.001527479, 0.02597528]).reshape(2, 2, order='F')
cov_re_ml_drf_9 = np.array([
0.510175, 0.08826114, 0.08826114, 3.342888]).reshape(2, 2, order='F')
scale_ml_drf_9 = np.array([3.722112])
loglike_ml_drf_9 = np.array([-589.8274])
ranef_mean_ml_drf_9 = np.array([0.03253644, 0.224043])
ranef_condvar_ml_drf_9 = np.array([
0.3994872, 0.02478884, 0.02478884, 1.195077]).reshape(2, 2, order='F')
coef_reml_drf_9 = np.array([-1.272483, 0.861814])
vcov_reml_drf_9 = np.array([
0.02228589, 0.001535598, 0.001535598, 0.0262125]).reshape(2, 2, order='F')
cov_re_reml_drf_9 = np.array([
0.5123204, 0.08897376, 0.08897376, 3.341722]).reshape(2, 2, order='F')
scale_reml_drf_9 = np.array([3.764058])
loglike_reml_drf_9 = np.array([-591.7188])
ranef_mean_reml_drf_9 = np.array([0.03239688, 0.2230525])
ranef_condvar_reml_drf_9 = np.array([
0.401762, 0.02521271, 0.02521271, 1.203536]).reshape(2, 2, order='F')
coef_ml_irf_9 = np.array([-1.277018, 0.86395])
vcov_ml_irf_9 = np.array([
0.02205706, 0.001509887, 0.001509887, 0.02599941]).reshape(2, 2, order='F')
cov_re_ml_irf_9 = np.array([
0.5086816, 0, 0, 3.312757]).reshape(2, 2, order='F')
scale_ml_irf_9 = np.array([3.72105])
loglike_ml_irf_9 = np.array([-589.8628])
coef_reml_irf_9 = np.array([-1.276822, 0.8640243])
vcov_reml_irf_9 = np.array([
0.02225705, 0.001517774, 0.001517774, 0.02623682]).reshape(2, 2, order='F')
cov_re_reml_irf_9 = np.array([
0.5107725, 0, 0, 3.31152]).reshape(2, 2, order='F')
scale_reml_irf_9 = np.array([3.762967])
loglike_reml_irf_9 = np.array([-591.7543])
coef_ml_drf_10 = np.array([-0.9419566, -0.02359824, 1.085796])
vcov_ml_drf_10 = np.array([
0.001963536, -0.0003221793, 0.0001950186, -0.0003221793,
0.002534251, 0.0004107718, 0.0001950186, 0.0004107718,
0.002580736]).reshape(3, 3, order='F')
cov_re_ml_drf_10 = np.array([
0.2040541, 0.09038325, 0.09038325, 2.218903]).reshape(2, 2, order='F')
scale_ml_drf_10 = np.array([0.2558286])
loglike_ml_drf_10 = np.array([-379.6591])
ranef_mean_ml_drf_10 = np.array([0.03876325, -0.725853])
ranef_condvar_ml_drf_10 = np.array([
0.1988816, 0.1872403, 0.1872403, 0.4052274]).reshape(2, 2, order='F')
coef_reml_drf_10 = np.array([-0.9426367, -0.02336203, 1.085733])
vcov_reml_drf_10 = np.array([
0.002011348, -0.0003300612, 0.0002002948, -0.0003300612,
0.002589149, 0.000418987, 0.0002002948, 0.000418987,
0.002637433]).reshape(3, 3, order='F')
cov_re_reml_drf_10 = np.array([
0.2034827, 0.09063836, 0.09063836, 2.219191]).reshape(2, 2, order='F')
scale_reml_drf_10 = np.array([0.2630213])
loglike_reml_drf_10 = np.array([-386.0008])
ranef_mean_reml_drf_10 = np.array([0.03838686, -0.7240812])
ranef_condvar_reml_drf_10 = np.array([
0.1983981, 0.1865469, 0.1865469, 0.4100937]).reshape(2, 2, order='F')
coef_ml_irf_10 = np.array([-0.9441033, -0.01755913, 1.088568])
vcov_ml_irf_10 = np.array([
0.001960114, -0.0003215658, 0.0001944005, -0.0003215658,
0.00253441, 0.0004061179, 0.0001944005, 0.0004061179,
0.002589158]).reshape(3, 3, order='F')
cov_re_ml_irf_10 = np.array([
0.2032228, 0, 0, 2.192893]).reshape(2, 2, order='F')
scale_ml_irf_10 = np.array([0.2553399])
loglike_ml_irf_10 = np.array([-380.162])
coef_reml_irf_10 = np.array([-0.9448257, -0.01722993, 1.088557])
vcov_reml_irf_10 = np.array([
0.00200783, -0.0003294349, 0.0001996613, -0.0003294349,
0.00258937, 0.0004141667, 0.0001996613, 0.0004141667,
0.002646242]).reshape(3, 3, order='F')
cov_re_reml_irf_10 = np.array([
0.2026653, 0, 0, 2.193124]).reshape(2, 2, order='F')
scale_reml_irf_10 = np.array([0.2625147])
loglike_reml_irf_10 = np.array([-386.5024])
coef_ml_drf_11 = np.array([-1.36971, 0.1596278, 0.8588724])
vcov_ml_drf_11 = np.array([
0.0232326, 0.00172214, 0.002275343, 0.00172214,
0.02318941, 0.0004755663, 0.002275343, 0.0004755663,
0.02123474]).reshape(3, 3, order='F')
cov_re_ml_drf_11 = np.array([
0.3719096, 0.332198, 0.332198, 1.120588]).reshape(2, 2, order='F')
scale_ml_drf_11 = np.array([4.849781])
loglike_ml_drf_11 = np.array([-601.6432])
ranef_mean_ml_drf_11 = np.array([-0.4256917, -0.3907759])
ranef_condvar_ml_drf_11 = np.array([
0.2987928, 0.1992074, 0.1992074, 0.7477486]).reshape(2, 2, order='F')
coef_reml_drf_11 = np.array([-1.370236, 0.1597671, 0.8585994])
vcov_reml_drf_11 = np.array([
0.02351795, 0.001749756, 0.002301599, 0.001749756,
0.02346869, 0.0004785668, 0.002301599, 0.0004785668,
0.02149093]).reshape(3, 3, order='F')
cov_re_reml_drf_11 = np.array([
0.3680346, 0.3324419, 0.3324419, 1.118623]).reshape(2, 2, order='F')
scale_reml_drf_11 = np.array([4.922222])
loglike_reml_drf_11 = np.array([-604.5746])
ranef_mean_reml_drf_11 = np.array([-0.4168539, -0.3879533])
ranef_condvar_reml_drf_11 = np.array([
0.2965372, 0.2010191, 0.2010191, 0.7503986]).reshape(2, 2, order='F')
coef_ml_irf_11 = np.array([-1.370117, 0.1414964, 0.8466083])
vcov_ml_irf_11 = np.array([
0.02319951, 0.001705996, 0.002265252, 0.001705996,
0.02345623, 0.000514879, 0.002265252, 0.000514879,
0.02153162]).reshape(3, 3, order='F')
cov_re_ml_irf_11 = np.array([
0.4004789, 0, 0, 1.108087]).reshape(2, 2, order='F')
scale_ml_irf_11 = np.array([4.78776])
loglike_ml_irf_11 = np.array([-602.308])
coef_reml_irf_11 = np.array([-1.370663, 0.1417561, 0.8464232])
vcov_reml_irf_11 = np.array([
0.02348548, 0.001734072, 0.002291519, 0.001734072,
0.02373715, 0.0005177618, 0.002291519, 0.0005177618,
0.02178966]).reshape(3, 3, order='F')
cov_re_reml_irf_11 = np.array([
0.3966454, 0, 0, 1.106551]).reshape(2, 2, order='F')
scale_reml_irf_11 = np.array([4.860342])
loglike_reml_irf_11 = np.array([-605.2274])
|
6672b00ea2dc24b7763c4772646a70e73e7737d2
|
0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8
|
/visgraph/dbcore.py
|
622b4b755f0af6fc4b3497a8834303af5c557f59
|
[
"Apache-2.0"
] |
permissive
|
vivisect/vivisect
|
ac259918b6281d9431c32a0b2307c61f9cab0dec
|
b07e161cc28b19fdda0d047eefafed22c5b00f15
|
refs/heads/master
| 2023-08-25T09:02:00.526532
| 2023-07-26T03:07:07
| 2023-07-26T03:07:07
| 26,651,759
| 833
| 181
|
Apache-2.0
| 2023-09-07T03:43:53
| 2014-11-14T18:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 18,970
|
py
|
dbcore.py
|
'''
Visgraph supports backing the graph objects with a postgres db.
'''
import psycopg2
import collections
import visgraph.graphcore as vg_graphcore
init_db = '''
DROP TABLE IF EXISTS vg_edges;
CREATE TABLE vg_edges (
eid BIGSERIAL,
n1 BIGINT,
n2 BIGINT,
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (eid)
);
CREATE INDEX vg_edges_idx_n1 ON vg_edges (n1);
CREATE INDEX vg_edges_idx_n2 ON vg_edges (n2);
DROP TABLE IF EXISTS vg_edge_props;
CREATE TABLE vg_edge_props (
eid BIGINT,
pname VARCHAR(256) NOT NULL,
intval BIGINT,
strval VARCHAR(1024),
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (eid, pname)
);
CREATE INDEX vg_edge_eid_idx ON vg_edge_props (eid);
CREATE INDEX vg_edge_pname_intval ON vg_edge_props (pname, intval);
CREATE INDEX vg_edge_pname_strval ON vg_edge_props (pname, strval);
DROP TABLE IF EXISTS vg_nodes;
CREATE TABLE vg_nodes (
nid BIGSERIAL,
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY(nid)
);
DROP TABLE IF EXISTS vg_node_props;
CREATE TABLE vg_node_props (
nid BIGINT NOT NULL,
pname VARCHAR(255) NOT NULL,
intval BIGINT,
strval VARCHAR(1024),
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (nid,pname)
);
CREATE INDEX vg_node_nid_idx ON vg_node_props (nid);
CREATE INDEX vg_node_pname_intval ON vg_node_props (pname, intval);
CREATE INDEX vg_node_pname_strval ON vg_node_props (pname, strval);
'''
# Exanmple database creds...
default_dbinfo = {
'user':'visgraph',
'password':'ohhai!',
'database':'visgraph',
# Add host if you want...
}
def initGraphDb(dbinfo):
db = psycopg2.connect(**dbinfo)
c = db.cursor()
c.execute(init_db)
c.close()
db.commit()
db.close()
# Rollback transactions on exception
def rollsafe(f):
def doroll(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
try:
args[0].db.rollback()
except Exception:
pass
raise
doroll.__doc__ == f.__doc__
doroll.__name__ == f.__name__
return doroll
class DbGraphStore:
'''
A DbGraphStore object may be used for all the standard management
of node and edge information but may not be used for path queries.
FIXME possibly make it able to do path queries but *really* slow?
Use the buildSubGraph() API to pull path serchable graphs out of
the DBGraphStore.
'''
def __init__(self, dbinfo=None):
if dbinfo == False:
return
if dbinfo is None:
dbinfo = default_dbinfo
self.dbinfo = dbinfo
self.db = psycopg2.connect(**dbinfo)
self.autocommit = True
@rollsafe
def _doSelect(self, query, *args):
'''
For now, a fetchall based select wrapper.
'''
c = self.db.cursor()
c.execute(query, args)
res = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return res
@rollsafe
def _doInsert(self, query, *args):
'''
Standard insert wrapper.
'''
c = self.db.cursor()
c.execute(query, args)
c.close()
if self.autocommit:
self.db.commit()
@rollsafe
def _doUpdate(self, query, *args):
'''
Do an update with 'returning' syntax to know
if an update was made.
'''
res = []
c = self.db.cursor()
c.execute(query, args)
res = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return res
@rollsafe
def _doInsertRetId(self, query, *args):
'''
Insert with a returning value.
'''
c = self.db.cursor()
c.execute(query, args)
hid = c.fetchall()[0][0]
c.close()
if self.autocommit:
self.db.commit()
return hid
@rollsafe
def _doInsertRetIds(self, query, *args):
'''
Insert with a returning list of IDs.
'''
c = self.db.cursor()
c.execute(query, args)
rows = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return [ r[0] for r in rows ]
def _doCommit(self):
self.db.commit()
def addNode(self, nodeid=None, ninfo=None, **kwargs):
if nodeid is not None:
raise Exception('DbGraphStore Manages nodeid!')
q = 'INSERT INTO vg_nodes DEFAULT VALUES RETURNING nid'
nid = self._doInsertRetId(q)
if ninfo is not None:
kwargs.update(ninfo)
for key,val in kwargs.items():
self.setNodeProp(nid, key, val)
return nid
def delEdge(self, eid):
'''
Delete an edge from the graph database.
Example: g.delEdge(eid)
'''
q = '''
DELETE FROM
vg_edge_props
WHERE
eid = %s
'''
self._doInsert(q, eid)
q = '''
DELETE FROM
vg_edges
WHERE
eid = %s
'''
self._doInsert(q, eid)
def delNode(self, nid):
'''
Delete the given node (and his edges) from the graph dbase.
Example: g.delNode(nid)
NOTE: this will delete any edges which go to or from nid!
'''
# Delete edge properties to and from
q = '''
DELETE FROM
vg_edge_props
USING
vg_edges
WHERE
vg_edges.n1 = %s
AND
vg_edges.eid = vg_edge_props.eid
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_edge_props
USING
vg_edges
WHERE
vg_edges.n2 = %s
AND
vg_edges.eid = vg_edge_props.eid
'''
self._doInsert(q, nid)
# Delete edges to and from
q = '''
DELETE FROM
vg_edges
WHERE
vg_edges.n1 = %s
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_edges
WHERE
vg_edges.n2 = %s
'''
self._doInsert(q, nid)
# Delete from node properties
q = '''
DELETE FROM
vg_node_props
WHERE
nid = %s
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_nodes
WHERE
nid = %s
'''
self._doInsert(q, nid)
def setNodeProp(self, nid, pname, value):
if isinstance(value, bool):
value = int(value)
if isinstance(value, int):
q = 'UPDATE vg_node_props SET intval=%s,created=NOW() WHERE nid=%s and pname=%s RETURNING nid'
q1 = 'INSERT INTO vg_node_props (nid, pname, intval) VALUES (%s,%s,%s)'
else:
q = 'UPDATE vg_node_props SET strval=%s,created=NOW() WHERE nid=%s and pname=%s RETURNING nid'
q1 = 'INSERT INTO vg_node_props (nid, pname, strval) VALUES (%s,%s,%s)'
# return a value to see if we actually did the update...
res = self._doSelect(q, value, nid, pname)
if len(res) == 0:
self._doInsert(q1, nid, pname, value)
if self.autocommit:
self.db.commit()
def getNodeProp(self, nid, pname, default=None):
q = 'SELECT intval,strval from vg_node_props WHERE nid=%s AND pname=%s'
res = self._doSelect(q, nid, pname)
if len(res) == 0:
return default
intval, strval = res[0]
if intval is not None:
return intval
return strval
def delNodeProp(self, nid, pname):
q = 'DELETE FROM vg_node_props WHERE nid=%s AND pname=%s'
self._doInsert(q, nid, pname)
def getNodeProps(self, nid):
ret = {}
q = 'SELECT pname,intval,strval FROM vg_node_props WHERE nid=%s'
for pname,intval,strval in self._doSelect(q, nid):
if intval is not None:
ret[pname] = intval
else:
ret[pname] = strval
return ret
def getNodesProps(self, nids):
ret = collections.defaultdict(dict)
q = 'SELECT nid,pname,intval,strval FROM vg_node_props WHERE nid IN %s'
for nid,pname,intval,strval in self._doSelect(q, tuple(nids)):
if intval is not None:
ret[nid][pname] = intval
else:
ret[nid][pname] = strval
return ret.items()
def addEdge(self, fromid, toid, eid=None, einfo=None):
if eid is not None:
raise Exception('DbGraphStore Manages eid!')
if fromid is None:
raise Exception('Invalid from id (None)!')
if toid is None:
raise Exception('Invalid to id (None)!')
q = 'INSERT INTO vg_edges (n1, n2) VALUES (%s, %s) RETURNING eid'
eid = self._doInsertRetId(q, fromid, toid)
if einfo is not None:
for key,val in einfo.items():
self.setEdgeProp(eid, key, val)
return eid
def getRefsFrom(self, nodeid):
'''
Return a list of edges which originate with us.
Example: for eid, fromid, toid, einfo in g.getRefsFrom(id)
'''
q = '''
SELECT
vg_edges.*,
vg_edge_props.*
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n1 = %s AND
vg_edges.eid = vg_edge_props.eid
'''
refs = {}
res = self._doSelect(q, nodeid)
for eid,n1,n2,created,eid1,pname,intval,strval,created1 in res:
r = refs.get(eid)
if r is None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval is not None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def getRefsTo(self, nodeid):
'''
Return a list of edges which we reference.
Example: for eid, fromid, toid, einfo in g.getRefsTo(id)
'''
q = '''
SELECT
vg_edges.*,
vg_edge_props.*
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n2 = %s AND
vg_edges.eid = vg_edge_props.eid
'''
refs = {}
res = self._doSelect(q, nodeid)
for eid,n1,n2,created,eid1,pname,intval,strval,created1 in res:
r = refs.get(eid)
if r is None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval is not None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def getRefsFromBulk(self, nids):
'''
Return a list of edges which originate with us.
Supply a list of edges to get refs.
Example: for eid, fromid, toid, einfo in g.getRefsFromBulk(nids)
'''
q = '''
SELECT
vg_edges.eid, vg_edges.n1, vg_edges.n2,
vg_edge_props.pname, vg_edge_props.intval, vg_edge_props.strval
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n1 IN (%s) AND
vg_edges.eid = vg_edge_props.eid
'''
if not nids:
return []
refs = {}
qend = ','.join( ['%s',] * len(nids))
q = q % qend
res = self._doSelect(q, *nids)
for eid, n1, n2, pname, intval, strval in res:
r = refs.get(eid)
if r is None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval is not None:
r[3][pname] = intval
else:
r[3][pname] = strval
return list(refs.values())
def getRefsToBulk(self, nids):
'''
Return a list of edges which we reference.
Supply a list of edges to gets refs.
Example: for eid, fromid, toid, einfo in g.getRefsToBulk(nids)
'''
q = '''
SELECT
vg_edges.eid, vg_edges.n1, vg_edges.n2,
vg_edge_props.pname, vg_edge_props.intval, vg_edge_props.strval
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n2 IN (%s) AND
vg_edges.eid = vg_edge_props.eid
'''
if not nids:
return []
refs = {}
qend = ','.join( ['%s',] * len(nids))
q = q % qend
res = self._doSelect(q, *nids)
for eid, n1, n2, pname, intval, strval in res:
r = refs.get(eid)
if r is None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval is not None:
r[3][pname] = intval
else:
r[3][pname] = strval
return list(refs.values())
def setEdgeProp(self, eid, pname, value):
if isinstance(value, bool):
value = int(value)
if isinstance(value, int):
q = 'UPDATE vg_edge_props SET intval=%s WHERE eid=%s and pname=%s RETURNING eid'
q1 = 'INSERT INTO vg_edge_props (eid, pname, intval) VALUES (%s,%s,%s)'
else:
q = 'UPDATE vg_edge_props SET strval=%s WHERE eid=%s and pname=%s RETURNING eid'
q1 = 'INSERT INTO vg_edge_props (eid, pname, strval) VALUES (%s,%s,%s)'
# return a value to see if we actually did the update...
res = self._doSelect(q, value, eid, pname)
if len(res) == 0:
self._doInsert(q1, eid, pname, value)
def getEdgeProp(self, eid, pname, default=None):
q = 'SELECT intval,strval from vg_edge_props WHERE eid=%s AND pname=%s'
res = self._doSelect(q, eid, pname)
if len(res) == 0:
return default
intval, strval = res[0]
if intval is not None:
return intval
return strval
def getEdge(self, eid):
'''
Get the edge tuple ( eid, n1, n2, nprops ) for the given edge by id.
'''
q = 'SELECT eid,n1,n2 FROM vg_edges WHERE eid=%s'
res = self._doSelect( q, eid )
if not res:
raise Exception('Invalid Edge Id: %s' % eid)
e,n1,n2 = res[0]
return (eid, n1, n2, self.getEdgeProps( eid ) )
def getEdgeProps(self, eid):
'''
Retrieve the properties dictionary for the given edge id.
'''
ret = {}
q = 'SELECT pname,intval,strval FROM vg_edge_props WHERE eid=%s'
for pname,intval,strval in self._doSelect(q, eid):
if intval is not None:
ret[pname] = intval
else:
ret[pname] = strval
return ret
def searchNodes(self, propname, propval=None):
'''
Return (but do not cache forward) the nid's of nodes which
have a property with the following name (and optionally, value).
Example:
for nid in g.searchNodes('woot', 10)
print(g.getNodeProp(nid, 'name'))
NOTE: This is specific to the DbGraphStore...
'''
if propval is None:
q = 'SELECT nid FROM vg_node_props WHERE pname=%s'
c = self.db.cursor()
c.execute(q, (propname,))
for row in c:
yield row
c.close()
def buildSubGraph(self):
'''
Return a subgraph which may be used to populate from the DB and
do path searching.
'''
return DbSubGraph(self.dbinfo)
class DbSubGraph(DbGraphStore, vg_graphcore.Graph):
'''
A subgraph in the database is basically a forward cached instance of selected
nodes and edges in an in-memory graph (visgraph.graphcore.Graph). This object
may then be used for traditional path tracing without going back to the database.
Any modifications to graph element properties *will* be synchronized back to the
database backing the given subgraph.
'''
def __init__(self, dbinfo):
vg_graphcore.Graph.__init__(self)
DbGraphStore.__init__(self, dbinfo)
def addNode(self, nodeid=None, ninfo=None, **kwargs):
# Do *both*
nid = DbGraphStore.addNode(self, nodeid=nodeid, ninfo=ninfo, **kwargs)
vg_graphcore.Graph.addNode(self, nodeid=nid, ninfo=None, **kwargs)
return nid
def addEdge(self, fromid, toid, einfo):
eid = DbGraphStore.addEdge(self, fromid, toid, einfo=einfo)
vg_graphcore.Graph.addEdge(self, fromid, toid, eid=eid, einfo=None)
return eid
def useEdges(self, **kwargs):
'''
Pull some edges from the DbStore backing this subgraph into the actual
visgraph.graphcore.Graph instance so path traversal is possible.
'''
done = {}
for key,val in kwargs.items():
if isinstance(val, int):
# FIXME is vg_edges.eid faster or vg_edge_props?
q = 'SELECT vg_edges.eid,n1,n2 FROM vg_edge_props,vg_edges WHERE pname=%s AND intval=%s AND vg_edges.eid=vg_edge_props.eid'
else:
q = 'SELECT vg_edges.eid,n1,n2 FROM vg_edge_props,vg_edges WHERE pname=%s AND strval=%s AND vg_edges.eid=vg_edge_props.eid'
for eid,n1,n2 in self._doSelect(q, key, val):
done[eid] = (eid, n1, n2)
# FIXME add the nodes for these edges
for eid, n1, n2 in done.values():
if vg_graphcore.Graph.getNode(self, n1) is None:
vg_graphcore.Graph.addNode(self, nodeid=n1)
if vg_graphcore.Graph.getNode(self, n2) is None:
vg_graphcore.Graph.addNode(self, nodeid=n2)
vg_graphcore.Graph.addEdge(self, n1, n2, eid=eid)
def expandNode(self, nid, maxdepth=1):
'''
Add *all* the edges (and adjacent nodes) by traversing this nodes
edges to the specified depth...
'''
todo = [(nid, 0),]
if vg_graphcore.Graph.getNode(self, nid) is None:
vg_graphcore.Graph.addNode(self, nodeid=nid)
while len(todo):
nid,depth = todo.pop()
if depth > maxdepth:
continue
# Do expansion based on the *database*
q = 'SELECT eid,n2 FROM vg_edges WHERE n1=%s'
for eid, n2 in self._doSelect(q, nid):
if vg_graphcore.Graph.getNode(self, n2) is None:
vg_graphcore.Graph.addNode(self, nodeid=n2)
if vg_graphcore.Graph.getEdge(self, eid) is None:
vg_graphcore.Graph.addEdge(self, nid, n2, eid=eid)
ndepth = depth+1
if ndepth < maxdepth:
todo.append((n2, ndepth))
# pullNode?
# expandNode?
|
15cfee1a746d9be7c05fe7b916855adf97bbbe15
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/examples/styling/visuals/specifying_colors.py
|
edc7123460418dbdec3223c020467d546ba0b114
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 708
|
py
|
specifying_colors.py
|
import numpy as np
from bokeh.plotting import figure, show
x = [1, 2, 3]
y1 = [1, 4, 2]
y2 = [2, 1, 4]
y3 = [4, 3, 2]
# use a single RGBA color
single_color = (255, 0, 0, 0.5)
# use a list of different colors
list_of_colors = [
"hsl(60deg 100% 50% / 1.0)",
"rgba(0, 0, 255, 0.9)",
"LightSeaGreen",
]
# use a series of color values as numpy array
numpy_array_of_colors = np.array(
[
0xFFFF00FF,
0x00FF00FF,
0xFF000088,
],
np.uint32,
)
p = figure(title="Specifying colors")
# add glyphs to plot
p.line(x, y1, line_color=single_color)
p.circle(x, y2, radius=0.12, color=list_of_colors)
p.triangle(x, y3, size=30, fill_color=numpy_array_of_colors)
show(p)
|
8b0ad149b5ee64c56b6aca6152c4e83fbf299ee6
|
67ce6a1d1369463b15023cc5bd1be9e823bab398
|
/lib/pymedphys/_dicom/rtplan/__init__.py
|
8e811ffe4549926d5a717a97f3277ea4894af0e9
|
[
"Apache-2.0"
] |
permissive
|
pymedphys/pymedphys
|
2487efe7259cc4e226e93d32fe86cef01673016e
|
f6acdf9bd2e8a32e372966879284fbd71c612358
|
refs/heads/main
| 2023-08-05T06:27:48.110296
| 2023-06-07T18:22:09
| 2023-06-07T18:22:09
| 168,238,552
| 288
| 79
|
Apache-2.0
| 2023-05-30T03:23:50
| 2019-01-29T22:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
__init__.py
|
from .adjust import convert_to_one_fraction_group
from .build import (
build_control_points,
merge_beam_sequences,
replace_beam_sequence,
replace_fraction_group,
restore_trailing_zeros,
)
from .core import (
get_beam_indices_of_fraction_group,
get_cp_attribute_leaning_on_prior,
get_fraction_group_beam_sequence_and_meterset,
get_fraction_group_index,
get_gantry_angles_from_dicom,
get_leaf_jaw_positions_for_type,
get_metersets_from_dicom,
get_surface_entry_point,
get_surface_entry_point_with_fallback,
require_gantries_be_zero,
)
|
c4ff31d01c9c92d1feff62bd995f4181078c6108
|
9b391863599ecc26a6804f9f272f8c7f2aee8a8c
|
/tick/hawkes/inference/tests/hawkes_conditional_law_test.py
|
8689b7e7114c15fbb6ff464549aa35f915f4c62e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
X-DataInitiative/tick
|
4db1bce7471bb48757b54e86b0f7946f36e78dde
|
04dbb377b47783036a8343c6a61b60fc9f430dc3
|
refs/heads/master
| 2023-08-13T10:28:15.560632
| 2023-03-05T00:16:57
| 2023-03-05T00:16:57
| 75,284,069
| 475
| 120
|
BSD-3-Clause
| 2023-03-05T00:16:58
| 2016-12-01T10:59:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,381
|
py
|
hawkes_conditional_law_test.py
|
# License: BSD 3 clause
import os
import unittest
import numpy as np
from numpy.random import random, randint
from tick.base.inference import InferenceTest
from tick.hawkes.inference import HawkesConditionalLaw
class Test(InferenceTest):
def setUp(self):
self.dim = 2
np.random.seed(320982)
self.timestamps = [
np.cumsum(random(randint(20, 25))) * 10 for _ in range(self.dim)
]
self.model = HawkesConditionalLaw(n_quad=5)
self.model.fit(self.timestamps)
def test_hawkes_conditional_law_norm(self):
"""...Test HawkesConditionalLaw kernels norm estimation
"""
np.testing.assert_array_almost_equal(
self.model.kernels_norms,
[[-0.81130911, -1.12992177], [-1.16313257, -1.72348019]])
def test_hawkes_conditional_law_kernels(self):
"""...Test HawkesConditionalLaw kernel estimation
"""
saved_phi_path = os.path.join(
os.path.dirname(__file__),
'hawkes_conditional_law_test-kernels.npy')
saved_phi = np.load(saved_phi_path)
np.testing.assert_array_almost_equal(self.model.kernels, saved_phi)
def test_hawkes_conditional_law_baseline(self):
"""...Test HawkesConditionalLaw baseline estimation
"""
np.testing.assert_array_almost_equal(self.model.baseline,
[0.61213243, 0.808886425])
def test_hawkes_conditional_mean_intensity(self):
"""...Test HawkesConditionalLaw mean intensity estimation
"""
np.testing.assert_array_almost_equal(self.model.mean_intensity,
[0.208121177, 0.208121177])
def test_hawkes_quad_method(self):
"""...Test HawkesConditionalLaw estimates with different quadrature
methods
"""
model = HawkesConditionalLaw(n_quad=5, quad_method='gauss')
model.fit(self.timestamps)
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[-0.81130911, -1.12992177], [-1.16313257, -1.72348019]])
model = HawkesConditionalLaw(n_quad=5, quad_method='gauss-')
model.fit(self.timestamps)
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[-77.76904711, 0.69985519], [-42.87140913, 0.13607425]])
model = HawkesConditionalLaw(n_quad=5, quad_method='lin')
model.fit(self.timestamps)
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[7.92561315, 1.74540188], [-28.57048537, 10.77926367]])
model = HawkesConditionalLaw(n_quad=5, quad_method='log')
model.fit(self.timestamps)
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[35.70738975, 18.96902121], [-51.69638233, -30.33936597]])
def test_hawkes_claw_method(self):
"""...Test HawkesConditionalLaw estimates with different conditional
law methods
"""
model = HawkesConditionalLaw(n_quad=5, claw_method='lin')
model.incremental_fit(self.timestamps, compute=False)
model.compute()
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[-0.81130911, -1.12992177], [-1.16313257, -1.72348019]])
model = HawkesConditionalLaw(n_quad=5, claw_method='log')
model.incremental_fit(self.timestamps)
np.testing.assert_array_almost_equal(
model.kernels_norms,
[[0.46108403, -0.09467477], [-0.04787463, -3.82917571]])
def test_incremental_fit(self):
# This should not raise a warning
self.model.incremental_fit(self.timestamps, compute=False)
msg = 'compute\(\) method was already called, computed ' \
'kernels will be updated.'
with self.assertWarnsRegex(UserWarning, msg):
self.model.incremental_fit(self.timestamps, compute=True)
new_model = HawkesConditionalLaw(n_quad=5, claw_method='lin')
new_model.incremental_fit(self.timestamps, compute=False)
# This should not raise a warning
new_model.incremental_fit(self.timestamps, compute=True)
with self.assertWarnsRegex(UserWarning, msg):
new_model.incremental_fit(self.timestamps, compute=True)
if __name__ == "__main__":
unittest.main()
|
457256779262f43a5a4a446b75fa5a3e476267c4
|
082cb56436631f16585dc6c667a8b384cee3335f
|
/script/talk/source/t400120.py
|
867bd2ff823173e6a9865644673b9c518cb8e8dc
|
[] |
no_license
|
vawser/Cinders-DS3
|
abf2c5e1c163f2e556a0d89e437eead3ddd6992c
|
d086ebce45b27806f757e04778dad1615e405dab
|
refs/heads/master
| 2023-09-01T00:48:00.500866
| 2023-08-07T12:25:24
| 2023-08-07T12:25:24
| 230,333,994
| 192
| 203
| null | 2022-02-13T21:09:26
| 2019-12-26T22:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 25,931
|
py
|
t400120.py
|
#-------------------------------------------
#-- Fire Keeper
#-------------------------------------------
# -*- coding: utf-8 -*-
def t400120_1():
""" State 0,1 """
assert GetCurrentStateElapsedTime() > 1
""" State 2 """
while True:
call = t400120_x14()
assert IsClientPlayer() == 1
""" State 3 """
call = t400120_x15()
assert not IsClientPlayer()
def t400120_x0(action2=_):
""" State 0,1 """
OpenGenericDialog(8, action2, 3, 4, 2)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
if GetGenericDialogButtonResult() == 1:
""" State 3 """
return 0
else:
""" State 4 """
return 1
def t400120_x1(z4=6120, flag4=1015, flag5=6000, flag6=6000, flag7=6000, flag8=6000):
""" State 0,1 """
while True:
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
""" State 3 """
assert (GetEventStatus(flag4) == 1 or GetEventStatus(flag5) == 1 or GetEventStatus(flag6) ==
1 or GetEventStatus(flag7) == 1 or GetEventStatus(flag8) == 1)
""" State 2 """
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif (not GetEventStatus(flag4) and not GetEventStatus(flag5) and not GetEventStatus(flag6) and
not GetEventStatus(flag7) and not GetEventStatus(flag8)):
pass
elif CheckActionButtonArea(z4):
break
""" State 4 """
return 0
def t400120_x2():
""" State 0,1 """
if not CheckSpecificPersonTalkHasEnded(0):
""" State 7 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
""" State 6 """
ReportConversationEndToHavokBehavior()
else:
pass
""" State 2 """
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
""" State 3 """
ForceCloseGenericDialog()
else:
pass
""" State 4 """
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
""" State 5 """
ForceCloseMenu()
else:
pass
""" State 8 """
return 0
def t400120_x3():
""" State 0,1 """
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
ForceCloseGenericDialog()
ForceCloseMenu()
ReportConversationEndToHavokBehavior()
""" State 2 """
return 0
def t400120_x4(text3=12002600, z3=74000115, flag3=0, mode3=1):
""" State 0,5 """
assert t400120_x3() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 2 """
SetEventState(z3, 1)
""" State 1 """
# talk:12002600:
TalkToPlayer(text3, -1, -1, flag3)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 4 """
if not mode3:
pass
else:
""" State 3 """
ReportConversationEndToHavokBehavior()
""" State 6 """
return 0
def t400120_x5(text2=_, z2=_, flag2=0, mode2=0):
""" State 0,5 """
assert t400120_x3() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 1 """
TalkToPlayer(text2, -1, -1, flag2)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 4 """
if not mode2:
pass
else:
""" State 3 """
ReportConversationEndToHavokBehavior()
""" State 2 """
SetEventState(z2, 1)
""" State 6 """
return 0
def t400120_x6(text1=_, flag1=0, mode1=_):
""" State 0,4 """
assert t400120_x3() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 1 """
TalkToPlayer(text1, -1, -1, flag1)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 3 """
if not mode1:
pass
else:
""" State 2 """
ReportConversationEndToHavokBehavior()
""" State 5 """
return 0
def t400120_x7(action1=_):
""" State 0,1 """
OpenGenericDialog(7, action1, 1, 0, 1)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
return 0
def t400120_x8(goods1=2138, goods2=390, goods3=2002, goods5=2000, goods6=2016):
""" State 0,8 """
c1110()
""" State 1 """
while True:
ClearTalkListData()
""" State 2 """
#---- Normal
# Level Up
AddTalkListData(1, 15002000, -1)
# Heal the Dark Sigil
AddTalkListData(4, 15002004, 74000125)
# Eyes of a Fire Keeper
AddTalkListDataIf(ComparePlayerInventoryNumber(3, goods1, 2, 0, 0) == 1, 10, 15002001, -1)
# Fire Keeper Soul
AddTalkListDataIf(ComparePlayerInventoryNumber(3, goods2, 2, 0, 0) == 1, 16, 15002005, -1)
# Form Betrothal
#AddTalkListDataIf(GetEventStatus(25008020) == 0 and ComparePlayerInventoryNumber(3, goods5, 2, 0, 0) == 1 and GetEventStatus(25009850) == 0, 30, 15015040, -1)
# Flirt
#AddTalkListDataIf(GetEventStatus(25008020) == 1 and GetEventStatus(25009850) == 0, 31, 15015041, -1)
# Divorce
#AddTalkListDataIf(GetEventStatus(25008020) == 1 and GetEventStatus(25009850) == 0, 32, 15015042, -1)
# Seduce
AddTalkListDataIf(GetEventStatus(25008020) == 1 and GetEventStatus(25009850) == 0, 33, 15015043, -1)
# Talk
AddTalkListData(20, 15000000, -1)
# Leave
AddTalkListData(99, 15000005, -1)
assert (not CheckSpecificPersonGenericDialogIsOpen(2) and not (CheckSpecificPersonMenuIsOpen(-1,
2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2)))
""" State 3 """
ShowShopMessage(1)
if GetTalkListEntryResult() == 1:
""" State 4 """
if GetEventStatus(2051) == 1 or IsMultiplayerInProgress() == 1:
pass
else:
""" State 13 """
# talk:12000200:Very well.
assert t400120_x6(text1=12000200, flag1=0, mode1=0)
""" State 11 """
def WhilePaused():
SetTalkTime(0.1)
assert not GetEventStatus(74000137) and not GetEventStatus(74000138)
""" State 19 """
SetEventState(74000135, 1)
call = t400120_x25()
def ExitPause():
SetEventState(74000135, 0)
SetEventState(74000136, 0)
if call.Get() == 1:
""" State 21 """
Label('L0')
return 0
elif call.Done():
continue
elif GetTalkListEntryResult() == 16:
""" State 5,17 """
assert t400120_x22()
return 0
elif GetTalkListEntryResult() == 20:
""" State 7,15 """
assert t400120_x20()
return 0
# Form Betrothal
elif GetTalkListEntryResult() == 30:
SetEventState(25008020, 1)
PlayerEquipmentQuantityChange(3, 2000, -1)
assert t400120_x6(text1=10101030, flag1=0, mode1=0)
continue
# Flirt
elif GetTalkListEntryResult() == 31:
# Good
if GetEventStatus(25008900):
assert t400120_x6(text1=10101000, flag1=0, mode1=0)
GetItemFromItemLot(90010)
# Neutral
elif GetEventStatus(25008901):
assert t400120_x6(text1=10101010, flag1=0, mode1=0)
# Bad
elif GetEventStatus(25008902):
assert t400120_x6(text1=10101020, flag1=0, mode1=0)
continue
# Divorce
elif GetTalkListEntryResult() == 32:
assert t400120_x6(text1=10101020, flag1=0, mode1=0)
SetEventState(25008020, 0)
return 0
# Seduce
elif GetTalkListEntryResult() == 33:
# BONK!
assert t400120_x29(text1=12003600)
#OpenGenericDialog(1, 99030720, 0, 0, 0)
return 0
elif not (CheckSpecificPersonMenuIsOpen(1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0)):
""" State 6,14 """
assert t400120_x19()
Goto('L0')
elif GetTalkListEntryResult() == 10:
""" State 9,16 """
def ExitPause():
SetEventState(74000122, 0)
assert t400120_x21(goods1=goods1)
continue
elif GetTalkListEntryResult() == 4:
""" State 10 """
if GetEventStatus(2051) == 1 or IsMultiplayerInProgress() == 1:
pass
else:
""" State 18 """
# goods:490:Dark Sigil
assert t400120_x23(goods3=490, z1=0)
continue
""" State 12,20 """
assert t400120_x7(action1=13002040)
def t400120_x9():
if GetEventStatus(25009850) == 0:
""" State 0,1 """
if GetEventStatus(1002) == 1:
""" State 8 """
if GetEventStatus(74000121) == 1:
""" State 11 """
Label('L0')
""" State 20 """
# talk:12003150:I'm truly sorry.
assert t400120_x6(text1=12003150, flag1=0, mode1=0)
""" State 6 """
Label('L1')
SetEventState(74000121, 0)
elif GetEventStatus(50006020) == 1:
""" State 12,22 """
# talk:12002000:Ashen one, link the fire.
assert t400120_x6(text1=12002000, flag1=0, mode1=0)
else:
""" State 13,21 """
# talk:12002200:Ashen one, if, when thou peerest upon the First Flame...
assert t400120_x6(text1=12002200, flag1=0, mode1=0)
elif GetEventStatus(1001) == 1:
""" State 7 """
if GetEventStatus(74000121) == 1:
Goto('L0')
elif not GetEventStatus(74000101):
""" State 9,19 """
# talk:12001300:The five lords sit their five thrones.
assert t400120_x5(text2=12001300, z2=74000101, flag2=0, mode2=0)
else:
""" State 10,18 """
# talk:12001400:Ashen one, with the Lords as thy witness, bend thy knee afore the bonfire's coiled sword.
assert t400120_x6(text1=12001400, flag1=0, mode1=0)
else:
""" State 2 """
if GetEventStatus(74000121) == 1:
""" State 5,17 """
# talk:12003100:I'm truly sorry.
assert t400120_x6(text1=12003100, flag1=0, mode1=0)
Goto('L1')
elif not GetEventStatus(74000100):
""" State 3,16 """
# talk:12000000:Welcome to the bonfire, Unkindled One.
assert t400120_x5(text2=12000000, z2=74000100, flag2=0, mode2=0)
else:
""" State 4 """
if not GetEventStatus(131):
""" State 23 """
# talk:12003200:Ashen one.
SetEventState(131, 1)
assert t400120_x6(text1=12003200, flag1=0, mode1=0)
else:
""" State 15 """
# talk:12000100:Welcome home, ashen one.
assert t400120_x6(text1=12000100, flag1=0, mode1=0)
""" State 14 """
# goods:2138:Eyes of a Fire Keeper, goods:390:Fire Keeper Soul
assert t400120_x8(goods1=2138, goods2=390, goods3=2002, goods5=2000, goods6=2016)
""" State 24 """
return 0
def t400120_x10():
""" State 0,6 """
assert t400120_x2()
""" State 3 """
assert GetCurrentStateElapsedFrames() > 1
""" State 1 """
assert not GetEventStatus(1016) and not GetEventStatus(1017)
""" State 2 """
if GetDistanceToPlayer() < 10:
""" State 4,8 """
call = t400120_x18()
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
""" State 7 """
assert t400120_x2()
else:
""" State 5 """
pass
""" State 9 """
return 0
def t400120_x11():
""" State 0,1 """
if GetEventStatus(1018) == 1:
""" State 2 """
pass
else:
""" State 3 """
if GetDistanceToPlayer() < 10:
""" State 4 """
if GetEventStatus(50006020) == 1:
""" State 6,9 """
# talk:12002900:
call = t400120_x6(text1=12002900, flag1=0, mode1=1)
if call.Done():
Goto('L0')
elif GetDistanceToPlayer() > 12:
pass
else:
""" State 7,10 """
# talk:12002950:
call = t400120_x6(text1=12002950, flag1=0, mode1=1)
if call.Done():
Goto('L0')
elif GetDistanceToPlayer() > 12:
pass
""" State 8 """
assert t400120_x2()
else:
""" State 5 """
pass
""" State 11 """
Label('L0')
return 0
def t400120_x12():
""" State 0,1,2 """
assert t400120_x2()
""" State 3 """
return 0
def t400120_x13():
""" State 0,1 """
if (CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonMenuIsOpen(12, 0) and not
CheckSpecificPersonGenericDialogIsOpen(0)):
""" State 2,5 """
call = t400120_x19()
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
""" State 4 """
Label('L0')
assert t400120_x2()
else:
""" State 3 """
Goto('L0')
""" State 6 """
return 0
def t400120_x14():
""" State 0,1 """
while True:
call = t400120_x16()
assert not GetEventStatus(1000) and not GetEventStatus(1001) and not GetEventStatus(1002)
""" State 2 """
call = t400120_x17()
assert GetEventStatus(1000) == 1 or GetEventStatus(1001) == 1 or GetEventStatus(1002) == 1
def t400120_x15():
""" State 0,1 """
assert t400120_x2()
""" State 2 """
return 0
def t400120_x16():
""" State 0,1 """
call = t400120_x26()
assert CheckSelfDeath() == 1
""" State 2 """
t400120_x11()
def t400120_x17():
""" State 0 """
def t400120_x18():
""" State 0,1 """
if not GetEventStatus(74000115):
""" State 2,5 """
# talk:12002600:
assert t400120_x4(text3=12002600, z3=74000115, flag3=0, mode3=1)
else:
""" State 3,6 """
# talk:12002700:
assert t400120_x6(text1=12002700, flag1=0, mode1=1)
""" State 4 """
SetEventState(74000115, 0)
""" State 7 """
return 0
def t400120_x19():
""" State 0,1 """
if GetEventStatus(1002) == 1:
""" State 5 """
if GetEventStatus(50006020) == 1:
""" State 6,10 """
# talk:12002100:Ashen one, may the flames guide thee.
assert t400120_x6(text1=12002100, flag1=0, mode1=1)
else:
""" State 7,11 """
# talk:12002300:Mayst thou thy peace discov'r.
assert t400120_x6(text1=12002300, flag1=0, mode1=1)
else:
""" State 2 """
if GetEventStatus(50006020) == 1:
""" State 3,8 """
# talk:12000400:Farewell, ashen one.
assert t400120_x6(text1=12000400, flag1=0, mode1=1)
else:
""" State 4,9 """
# talk:12000500:Farewell, ashen one.
assert t400120_x6(text1=12000500, flag1=0, mode1=1)
""" State 12 """
return 0
def t400120_x20():
""" State 0,1 """
if GetEventStatus(50006020) == 1:
""" State 2 """
if GetEventStatus(9300) == 1 and not GetEventStatus(1002):
""" State 4,20 """
# talk:12000700:Ashen one, may I pose thee a question?
assert t400120_x6(text1=12000700, flag1=0, mode1=0)
else:
""" State 3,19 """
# talk:12000600:Ashen one, to be unkindled is to be a vessel for souls.
assert t400120_x6(text1=12000600, flag1=0, mode1=0)
else:
""" State 5 """
if not GetEventStatus(74000130) and not GetEventStatus(74000131):
""" State 7,21 """
# talk:12000800:Ashen one, my thanks for the eyes thou'st given.
assert (t400120_x6(text1=12000800, flag1=0, mode1=0) and (not CheckSpecificPersonGenericDialogIsOpen(2)
and not (CheckSpecificPersonMenuIsOpen(-1, 2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2))))
""" State 14 """
ClearTalkListData()
""" State 15 """
# action:14002000:Wish for a world without flame
AddTalkListData(1, 14002000, -1)
# action:14002001:Decline
AddTalkListData(2, 14002001, -1)
""" State 13 """
OpenConversationChoicesMenu(0)
assert not (CheckSpecificPersonMenuIsOpen(12, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
""" State 17 """
if GetTalkListEntryResult() == 1:
""" State 12,24 """
# talk:12000900:Of course.
assert t400120_x5(text2=12000900, z2=74000130, flag2=0, mode2=0)
elif GetTalkListEntryResult() == 2:
""" State 16,25 """
# talk:12001100:Of course not.
assert t400120_x5(text2=12001100, z2=74000131, flag2=0, mode2=0)
else:
""" State 18 """
pass
else:
""" State 6 """
if GetEventStatus(74000130) == 1:
""" State 8 """
if not GetEventStatus(74000132) or GetEventStatus(74000110) == 1:
""" State 10,23 """
# talk:12001000:Ashen one, if thine heart should bend...
assert t400120_x6(text1=12001000, flag1=0, mode1=0)
else:
""" State 11,26 """
# talk:12001900:Ashen one, forgive me if this soundeth strange.
assert t400120_x5(text2=12001900, z2=74000110, flag2=0, mode2=0)
else:
""" State 9,22 """
# talk:12001200:Ashen one, kill me, and take these eyes away.
assert t400120_x6(text1=12001200, flag1=0, mode1=0)
""" State 27 """
return 0
def t400120_x21(goods1=2138):
""" State 0,6 """
# action:12002000:Give <?gdsparam@2138?>?
call = t400120_x0(action2=12002000)
if call.Get() == 0:
""" State 2,1 """
# goods:2138:Eyes of a Fire Keeper
PlayerEquipmentQuantityChange(3, goods1, -1)
""" State 3 """
SetEventState(50006020, 0)
""" State 5 """
SetEventState(74000122, 1)
""" State 7 """
# talk:12003300:...Ashen one, are these...
assert t400120_x6(text1=12003300, flag1=0, mode1=0)
elif call.Done():
""" State 4 """
pass
""" State 8 """
return 0
def t400120_x22(goods2=390):
""" State 0,5 """
# action:12002001:Give <?gdsparam@390?>?
call = t400120_x0(action2=12002001)
if call.Get() == 0:
""" State 2,1 """
# goods:390:Fire Keeper Soul
PlayerEquipmentQuantityChange(3, goods2, -1)
""" State 3 """
SetEventState(74000125, 1)
""" State 6 """
# talk:12003400:...Ashen one, this is...
assert (t400120_x6(text1=12003400, flag1=0, mode1=0) and (not CheckSpecificPersonGenericDialogIsOpen(2)
and not (CheckSpecificPersonMenuIsOpen(-1, 2) == 1 and not CheckSpecificPersonGenericDialogIsOpen(2))))
""" State 7 """
# action:13002030:The Fire Keeper is now able to heal the dark sigil
assert t400120_x7(action1=13002030)
""" State 8 """
# talk:12003500:Forgive me, sister.
assert t400120_x6(text1=12003500, flag1=0, mode1=0)
elif call.Done():
""" State 4 """
pass
""" State 9 """
return 0
def t400120_x23(goods3=490, z1=0):
""" State 0,1 """
# goods:490:Dark Sigil
if ComparePlayerInventoryNumber(3, goods3, 0, 0, 0) == 1:
""" State 3,20 """
# action:13002021:You have no dark sigil
assert t400120_x7(action1=13002021)
else:
""" State 2,24 """
assert t400120_x24(goods3=goods3, z1=z1)
""" State 10 """
SetMessageTagValue(0, GetLevelUpSoulCost(GetStatus(33), GetStatus(33) + GetWorkValue(z1)))
""" State 21 """
# action:12002002:Requires <?evntAcquittalPrice?> souls. \nWill you choose to heal the dark sigil?
call = t400120_x0(action2=12002002)
if call.Get() == 0:
""" State 6 """
if ComparePlayerStatus(8, 2, GetLevelUpSoulCost(GetStatus(33), GetStatus(33) + GetWorkValue(z1))):
""" State 4,13 """
TurnCharacterToFaceEntity(-1, 10000, -1)
assert GetCurrentStateElapsedFrames() > 1 and GetWhetherChrEventAnimHasEnded(10000) == 1
""" State 12 """
TurnCharacterToFaceEntity(69010, 10000, -1)
assert GetCurrentStateElapsedTime() > 1.5
""" State 8 """
# goods:490:Dark Sigil
PlayerEquipmentQuantityChange(3, goods3, -1 * GetWorkValue(z1))
""" State 9 """
ChangePlayerStats(8, 1, GetLevelUpSoulCost(GetStatus(33), GetStatus(33) + GetWorkValue(z1)))
""" State 11 """
SetEventState(74000124, 1)
""" State 14 """
if GetPlayerChrType() == 8:
""" State 15,18 """
GiveSpEffectToPlayer(3093)
else:
""" State 16,17 """
ChangePlayerStats(31, 5, 0)
""" State 19 """
assert GetCurrentStateElapsedTime() > 3
""" State 23 """
# action:13002020:Dark sigil has been healed
assert t400120_x7(action1=13002020) and GetWhetherChrEventAnimHasEnded(10000) == 1
else:
""" State 5,22 """
# action:13000050:Insufficient souls
assert t400120_x7(action1=13000050)
elif call.Done():
""" State 7 """
pass
""" State 25 """
return 0
def t400120_x24(goods3=490, z1=0):
""" State 0,1 """
# goods:490:Dark Sigil
SetWorkValue(z1, GetItemHeldNumLimit(3, goods3))
""" State 5 """
while True:
# goods:490:Dark Sigil
if ComparePlayerInventoryNumber(3, goods3, 0, GetWorkValue(z1), 0) or GetWorkValue(z1) <= 0:
break
else:
""" State 3,2 """
SetWorkValue(z1, GetWorkValue(z1) - 1)
""" State 4,6 """
return 0
def t400120_x25():
""" State 0,3 """
if DoesSelfHaveSpEffect(150) == 1 or DoesSelfHaveSpEffect(152) == 1:
""" State 4 """
SetEventState(74000136, 1)
""" State 2 """
if not GetEventStatus(74000135):
pass
elif GetEventStatus(74000137) == 1 and GetEventStatus(74000138) == 1:
""" State 1 """
OpenSoul()
assert not (CheckSpecificPersonMenuIsOpen(10, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0))
""" State 5 """
return 0
elif not GetEventStatus(74000135):
pass
""" State 6 """
return 1
def t400120_x26():
""" State 0,5 """
while True:
call = t400120_x1(z4=6120, flag4=1015, flag5=6000, flag6=6000, flag7=6000, flag8=6000)
if call.Done():
""" State 3 """
SetEventState(74000139, 1)
call = t400120_x9()
if call.Done():
pass
elif IsAttackedBySomeone() == 1:
""" State 1 """
Label('L0')
call = t400120_x10()
def ExitPause():
RemoveMyAggro()
if call.Done():
pass
elif IsPlayerDead() == 1:
break
elif IsPlayerDead() == 1:
break
elif GetDistanceToPlayer() > 3 or GetPlayerYDistance() > 0.25:
""" State 4 """
call = t400120_x13()
if call.Done() and (GetDistanceToPlayer() < 2.5 and GetPlayerYDistance() < 0.249):
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsPlayerDead() == 1:
break
""" State 2 """
t400120_x12()
def t400120_x27(action2=_):
""" State 0,1 """
OpenGenericDialog(8, action2, 1, 0, 1)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
if GetGenericDialogButtonResult() == 1:
""" State 3 """
return 0
else:
""" State 4 """
return 1
def t400120_x28(action2=_):
""" State 0,1 """
OpenGenericDialog(8, action2, 1, 0, 1)
assert not CheckSpecificPersonGenericDialogIsOpen(0)
""" State 2 """
if GetGenericDialogButtonResult() == 1:
""" State 3 """
return 0
else:
""" State 4 """
return 1
# BONK!
def t400120_x29(text1=_):
""" State 0,4 """
assert t400120_x3() and CheckSpecificPersonTalkHasEnded(0) == 1
""" State 1 """
TalkToPlayer(text1, -1, -1, 0)
assert CheckSpecificPersonTalkHasEnded(0) == 1
""" State 3 """
ReportConversationEndToHavokBehavior()
SetEventState(25008903, 1)
""" State 5 """
return 0
|
b0a3d69883a507edfcc9090db801e6484b9bd616
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/selenium__examples/screenshot_from_local_file_with_fixed_window_size.py
|
03bb037130ab9ed71dbf3b4af9b049bb0d84b673
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
screenshot_from_local_file_with_fixed_window_size.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import time
from pathlib import Path
# pip install selenium
from selenium import webdriver
file_name = "file_test.html"
driver = webdriver.Firefox()
driver.set_window_size(500, 500)
driver.get("file://" + str(Path(file_name).resolve()))
time.sleep(5)
driver.save_screenshot(file_name + ".png")
driver.quit()
|
41213096a9a7f5a8b4fb0c38d001fd72fb7cc1e0
|
36437b397a855f3986325f1bfe41d7ced00b703a
|
/tests/k8s/test_errors.py
|
706142676a027b09114765b0b339016e2049271c
|
[
"MIT"
] |
permissive
|
nolar/kopf
|
090cd21550e3a86e512a4c9150dfcf5f59ac14e4
|
538df59b88d1aab7b985d703483497f73c6c4783
|
refs/heads/main
| 2023-08-29T20:39:07.128912
| 2023-08-24T15:47:40
| 2023-08-24T15:47:40
| 288,234,242
| 1,627
| 154
|
MIT
| 2023-09-14T12:31:33
| 2020-08-17T16:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,882
|
py
|
test_errors.py
|
import aiohttp
import pytest
from kopf._cogs.clients.auth import APIContext, authenticated
from kopf._cogs.clients.errors import APIClientError, APIConflictError, APIError, \
APIForbiddenError, APINotFoundError, \
APIServerError, check_response
@authenticated
async def get_it(url: str, *, context: APIContext) -> None:
response = await context.session.get(url)
await check_response(response)
return await response.json()
def test_aiohttp_is_not_leaked_outside():
assert not issubclass(APIError, aiohttp.ClientError)
def test_exception_without_payload():
exc = APIError(None, status=456)
assert exc.status == 456
assert exc.code is None
assert exc.message is None
assert exc.details is None
def test_exception_with_payload():
exc = APIError({"message": "msg", "code": 123, "details": {"a": "b"}}, status=456)
assert exc.status == 456
assert exc.code == 123
assert exc.message == "msg"
assert exc.details == {"a": "b"}
@pytest.mark.parametrize('status', [200, 202, 300, 304])
async def test_no_error_on_success(
resp_mocker, aresponses, hostname, status):
resp = aresponses.Response(
status=status,
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
await get_it(f"http://{hostname}/")
# Note: 401 is wrapped into a LoginError and is tested elsewhere.
@pytest.mark.parametrize('status, exctype', [
(403, APIForbiddenError),
(404, APINotFoundError),
(409, APIConflictError),
(400, APIClientError),
(403, APIClientError),
(404, APIClientError),
(500, APIServerError),
(503, APIServerError),
(400, APIError),
(500, APIError),
(666, APIError),
])
async def test_error_with_payload(
resp_mocker, aresponses, hostname, status, exctype):
resp = aresponses.Response(
status=status,
headers={'Content-Type': 'application/json'},
text='{"kind": "Status", "code": 123, "message": "msg", "details": {"a": "b"}}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(APIError) as err:
await get_it(f"http://{hostname}/")
assert not isinstance(err.value, aiohttp.ClientResponseError)
assert isinstance(err.value, exctype)
assert err.value.status == status
assert err.value.code == 123
assert err.value.message == 'msg'
assert err.value.details == {'a': 'b'}
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_error_with_nonjson_payload(
resp_mocker, aresponses, hostname, status):
resp = aresponses.Response(
status=status,
headers={'Content-Type': 'application/json'},
text='unparsable json',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(APIError) as err:
await get_it(f"http://{hostname}/")
assert err.value.status == status
assert err.value.code is None
assert err.value.message is None
assert err.value.details is None
@pytest.mark.parametrize('status', [400, 500, 666])
async def test_error_with_parseable_nonk8s_payload(
resp_mocker, aresponses, hostname, status):
resp = aresponses.Response(
status=status,
headers={'Content-Type': 'application/json'},
text='{"kind": "NonStatus", "code": "xxx", "message": "msg"}',
)
aresponses.add(hostname, '/', 'get', resp_mocker(return_value=resp))
with pytest.raises(APIError) as err:
await get_it(f"http://{hostname}/")
assert err.value.status == status
assert err.value.code is None
assert err.value.message is None
assert err.value.details is None
|
bc8a2f5809f48cfcff77daf2663330a3b306ab36
|
9317a3ce2d972e65d4c08242606e10ad99c93c23
|
/src/autogpt_plugins/bing_search/bing_search.py
|
ddcb67a4c757d5ebf1a7f468e0215fc98483589c
|
[
"MIT"
] |
permissive
|
Significant-Gravitas/Auto-GPT-Plugins
|
c751b43fedf34a6e157e09c8b5cd5f9f4e3d17ac
|
397589394138f9ed43660b1aa99d501a8bfff061
|
refs/heads/master
| 2023-08-24T12:28:02.320683
| 2023-06-29T07:09:16
| 2023-06-29T07:09:16
| 625,411,260
| 3,377
| 499
|
MIT
| 2023-09-01T18:31:29
| 2023-04-09T02:54:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
bing_search.py
|
import json
import os
import re
import requests
def clean_text(text: str) -> str:
cleaned_text = re.sub("<[^>]*>", "", text) # Remove HTML tags
cleaned_text = cleaned_text.replace(
"\\n", " "
) # Replace newline characters with spaces
return cleaned_text
def _bing_search(query: str, num_results=8) -> str:
"""
Perform a Bing search and return the results as a JSON string.
"""
subscription_key = os.getenv("BING_API_KEY")
# Bing Search API endpoint
search_url = "https://api.bing.microsoft.com/v7.0/search"
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
params = {
"q": query,
"count": num_results,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
# Extract the search result items from the response
web_pages = search_results.get("webPages", {})
search_results = web_pages.get("value", [])
# Create a list of search result dictionaries with 'title', 'href', and 'body' keys
search_results_list = [
{
"title": clean_text(item["name"]),
"href": item["url"],
"body": clean_text(item["snippet"]),
}
for item in search_results
]
# Return the search results as a JSON string
return json.dumps(search_results_list, ensure_ascii=False, indent=4)
|
837d5ab7fa483c6ca22cf73480c4345c2857e809
|
26c238e12dd9eaca7872c83799c3be22d8f4fc64
|
/tests/test_app/library/loans/admin.py
|
37f57e46f9f9adb655b1a0930224aed655bce6c7
|
[
"MIT"
] |
permissive
|
farridav/django-jazzmin
|
324c62d7f79c07077a5792cf2943dd3106f2d9cc
|
586e81f204033add57b67f657e43681801356a9a
|
refs/heads/master
| 2023-08-03T01:30:12.556940
| 2023-02-02T11:52:14
| 2023-02-02T11:52:14
| 263,143,255
| 1,394
| 257
|
MIT
| 2023-09-11T16:51:31
| 2020-05-11T19:50:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
admin.py
|
from django.contrib import admin
from django.urls import path
from .models import BookLoan, Library
from .views import CustomView
class BookLoanInline(admin.StackedInline):
model = BookLoan
extra = 1
readonly_fields = ("id", "duration")
fields = (
"book",
"imprint",
"status",
"due_back",
"borrower",
"loan_start",
"duration",
)
@admin.register(BookLoan)
class BookLoanAdmin(admin.ModelAdmin):
list_display = ("book", "status", "borrower", "due_back", "id")
list_filter = ("status", "due_back")
autocomplete_fields = ("borrower",)
search_fields = ("book__title",)
readonly_fields = ("id",)
fieldsets = (
(None, {"fields": ("book", "imprint", "id")}),
("Availability", {"fields": ("status", "due_back", "duration", "borrower")}),
)
def get_urls(self):
"""
Add in a custom view to demonstrate =
"""
urls = super().get_urls()
return urls + [path("custom_view", CustomView.as_view(), name="custom_view")]
def response_change(self, request, obj):
ret = super().response_change(request, obj)
if "reserve" in request.POST:
obj.status = "r"
obj.save()
return ret
@admin.register(Library)
class LibraryAdmin(admin.ModelAdmin):
list_display = ("name", "address", "librarian")
|
e6f1ce89c260693564cfdaf903a5a976ec97c06e
|
cc6ffb5053d526224567058bbed0c450ebf670e6
|
/generate_keyczart.py
|
b8b022eaf4a76c0552ce03a461672dade4d8ca46
|
[
"Apache-2.0"
] |
permissive
|
grahamgilbert/Crypt-Server
|
3415709d56ff432a81ffd05f0e243ba98ab85445
|
8562fb56f3cecf931bc3e79a8279ea1e42983460
|
refs/heads/master
| 2023-08-04T15:26:45.585122
| 2023-07-16T10:26:46
| 2023-07-16T10:26:46
| 7,164,538
| 115
| 64
|
Apache-2.0
| 2023-08-02T02:19:50
| 2012-12-14T11:42:32
|
Python
|
UTF-8
|
Python
| false
| false
| 582
|
py
|
generate_keyczart.py
|
import keyczar
import subprocess
import os
directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "keyset")
if not os.path.exists(directory):
os.makedirs(directory)
if not os.listdir(directory):
location_string = "--location={}".format(directory)
cmd = ["keyczart", "create", location_string, "--purpose=crypt", "--name=crypt"]
subprocess.check_call(cmd)
cmd = ["keyczart", "addkey", location_string, "--status=primary"]
subprocess.check_call(cmd)
else:
print("Keyset directory already has something in there. Skipping key generation.")
|
29b9417fbd52a98e1e000b949d131d28730e02ea
|
bade5b29e8ba58adbe440f8eda491e43b2155132
|
/pronto/utils/io.py
|
e756248e1598332521be5e7a7a1e0d8b38eba787
|
[
"MIT"
] |
permissive
|
althonos/pronto
|
72697bd0aa0e69b728d70038d340b546de2d5b76
|
9e11c06e71c24685404fc1d0d3a560f4e2cdd3de
|
refs/heads/master
| 2023-08-20T19:46:30.418518
| 2023-08-17T10:27:39
| 2023-08-17T10:27:39
| 62,424,052
| 228
| 60
|
MIT
| 2023-09-04T04:39:11
| 2016-07-01T23:02:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,906
|
py
|
io.py
|
import bz2
import codecs
import gzip
import io
import lzma
import typing
import urllib.request
import warnings
from http.client import HTTPResponse
from typing import BinaryIO, ByteString, Dict, Optional, Union, cast
import chardet
MAGIC_GZIP = bytearray([0x1F, 0x8B])
MAGIC_LZMA = bytearray([0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00, 0x00])
MAGIC_BZIP2 = bytearray([0x42, 0x5A, 0x68])
class BufferedReader(io.BufferedReader):
"""A patch for `io.BufferedReader` supporting `http.client.HTTPResponse`."""
def read(self, size: Optional[int] = -1) -> bytes:
try:
return super(BufferedReader, self).read(size)
except ValueError:
if typing.cast(io.BufferedReader, self.closed):
return b""
raise
class EncodedFile(codecs.StreamRecoder):
def __init__(
self,
file: BinaryIO,
data_encoding: str,
file_encoding: Optional[str] = None,
errors: str = "strict",
):
if file_encoding is None:
file_encoding = data_encoding
data_info = codecs.lookup(data_encoding)
file_info = codecs.lookup(file_encoding)
super().__init__(
file,
data_info.encode,
data_info.decode,
file_info.streamreader,
file_info.streamwriter,
errors,
)
# Add attributes to simplify introspection
self.data_encoding = data_encoding
self.file_encoding = file_encoding
def read(self, size: Optional[int] = -1) -> bytes:
chunk = super().read(-1 if size is None else size)
return chunk.replace(b"\r\n", b"\n")
def readinto(self, buffer: ByteString) -> int:
chunk = self.read(len(buffer) // 2)
typing.cast(bytearray, buffer)[: len(chunk)] = chunk
return len(chunk)
def get_handle(path: str, timeout: int = 2) -> BinaryIO:
"""Given a path or URL, get a binary handle for that path."""
try:
return open(path, "rb", buffering=0)
except Exception as err:
headers = {"Keep-Alive": f"timeout={timeout}"}
request = urllib.request.Request(path, headers=headers)
res: HTTPResponse = urllib.request.urlopen(request, timeout=timeout)
if not res.status == 200:
raise ValueError(f"could not open {path}: {res.status} ({res.msg})")
if res.headers.get("Content-Encoding") in {"gzip", "deflate"}:
f = gzip.GzipFile(filename=res.geturl(), mode="rb", fileobj=res)
return typing.cast(BinaryIO, f)
return res
def get_location(reader: BinaryIO) -> Optional[str]:
"""Given a binary file-handle, try to extract the path/URL to the file."""
return (
getattr(reader, "name", None)
or getattr(reader, "url", None)
or getattr(reader, "geturl", lambda: None)()
)
def decompress(
reader: io.RawIOBase, path: Optional[str] = None, encoding: Optional[str] = None
) -> BinaryIO:
"""Given a binary file-handle, decompress it if it is compressed."""
buffered = BufferedReader(reader)
# Decompress the stream if it is compressed
if buffered.peek().startswith(MAGIC_GZIP):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase,
gzip.GzipFile(mode="rb", fileobj=typing.cast(BinaryIO, buffered)),
)
)
elif buffered.peek().startswith(MAGIC_LZMA):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase, lzma.LZMAFile(typing.cast(BinaryIO, buffered), mode="rb")
)
)
elif buffered.peek().startswith(MAGIC_BZIP2):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase, bz2.BZ2File(typing.cast(BinaryIO, buffered), mode="rb")
)
)
else:
decompressed = buffered
# Attempt to detect the encoding and decode the stream
det: Dict[str, Union[str, float]] = chardet.detect(decompressed.peek())
confidence = 1.0 if encoding is not None else cast(float, det["confidence"])
encoding = encoding if encoding is not None else cast(str, det["encoding"])
if encoding == "ascii":
encoding = "utf-8"
if confidence < 1.0:
warnings.warn(
f"unsound encoding, assuming {encoding} ({confidence:.0%} confidence)",
UnicodeWarning,
stacklevel=3,
)
if encoding == "utf-8":
return typing.cast(BinaryIO, decompressed)
else:
return typing.cast(
BinaryIO,
BufferedReader(
typing.cast(
io.RawIOBase,
EncodedFile(
typing.cast(typing.BinaryIO, decompressed),
"UTF-8",
typing.cast(str, det["encoding"]),
),
)
),
)
|
e17c7c4a1136b8c2559b87b8c022232e40bb9d8e
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
|
65fe10330a2e6f7d115469200312beec6d148cbb
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 24,292
|
py
|
source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from datetime import datetime
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
# Basic full refresh stream
class RkiCovidStream(HttpStream, ABC):
url_base = "https://api.corona-zahlen.org/"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
"""
TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.
Usually contains common params e.g. pagination size etc.
"""
return {}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""
TODO: Override this method to define how a response is parsed.
:return an iterable containing each record in the response
"""
yield response.json()
# class that contains main source germany | full-refresh
class Germany(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany"""
primary_key = None
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "germany/"
# class that contains main source states | full-refresh
class GermanyStates(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/states"""
primary_key = None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json():
for key, value in response.json().get("data").items():
yield value
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "states/"
# class that contains source age-groups in germany. | full-refresh
class GermanyAgeGroups(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/age-groups"""
primary_key = None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
yield response.json().get("data")
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "germany/age-groups"
# class that contains main source states | full-refresh
class GermanyStatesAgeGroups(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/states/age-groups"""
primary_key = None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json():
for key, value in response.json().get("data").items():
record = {"abbreviation": key}
for grp, data in value.items():
record.update({grp: data})
yield record
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "states/age-groups"
# Basic incremental stream
class IncrementalRkiCovidStream(RkiCovidStream, ABC):
state_checkpoint_interval = None
@property
def cursor_field(self) -> str:
"""
TODO
Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is
usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.
:return str: The name of the cursor field.
"""
return []
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and
the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.
"""
return {}
# source: germany/history/cases/:days | Incremental
class GermanyHistoryCases(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/cases/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/cases/" + str(self.date_to_int(self.start_date))
return "germany/history/cases/"
# source: germany/history/incidence/:days | Incremental
class GermanHistoryIncidence(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/incidence/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/incidence/" + str(self.date_to_int(self.start_date))
return "germany/history/incidence/"
# source: germany/history/deaths/:days | Incremental
class GermanHistoryDeaths(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/deaths/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/deaths/" + str(self.date_to_int(self.start_date))
return "germany/history/deaths/"
# source: germany/history/recovered/:days | Incremental
class GermanHistoryRecovered(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/recovered/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/recovered/" + str(self.date_to_int(self.start_date))
return "germany/history/recovered/"
# source: germany/history/frozen-incidence/:days | Incremental
class GermanHistoryFrozenIncidence(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/frozen-incidence/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data").get("history")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/frozen-incidence/" + str(self.date_to_int(self.start_date))
return "germany/history/frozen-incidence/"
# source: germany/history/hospitalization/:days | Incremental
class GermanHistoryHospitalization(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/hospitalization/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/hospitalization/" + str(self.date_to_int(self.start_date))
return "germany/history/hospitalization/"
# STATES FULL-REFRESH.
# source: states/history/cases/:days | FULL-REFRESH
class ByStateRkiCovidStream(RkiCovidStream, ABC):
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
for key, value in response.json().get("data").items():
for record in value.get("history"):
record.update({"name": value.get("name"), "abbreviation": key})
yield record
return [{}]
class StatesHistoryCases(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/cases/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/cases/" + str(self.date_to_int(self.start_date))
return "states/history/cases/"
# source: states/history/incidence/:days | FULL-REFRESH
class StatesHistoryIncidence(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/incidence/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/incidence/" + str(self.date_to_int(self.start_date))
return "states/history/incidence/"
# source: states/history/frozen-incidence/:days | FULL-REFRESH
class StatesHistoryFrozenIncidence(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/frozen-incidence/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/frozen-incidence/" + str(self.date_to_int(self.start_date))
return "states/history/frozen-incidence/"
# source: states/history/deaths/:days | FULL-REFRESH
class StatesHistoryDeaths(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/deaths/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/deaths/" + str(self.date_to_int(self.start_date))
return "states/history/deaths/"
# source: states/history/recovered/:days | FULL-REFRESH
class StatesHistoryRecovered(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/recovered/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/recovered/" + str(self.date_to_int(self.start_date))
return "states/history/recovered/"
# source: states/history/hospitalization/:days | FULL-REFRESH
class StatesHistoryHospitalization(ByStateRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/states/history/hospitalization/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "states/history/hospitalization/" + str(self.date_to_int(self.start_date))
return "states/history/hospitalization/"
# Source
class SourceRkiCovid(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
"""
Testing connection availability for the connector.
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
try:
req = requests.get(RkiCovidStream.url_base + "germany")
if req.status_code == 200:
return True, None
return False, req.reason
except Exception:
return False, "There is a problem in source check connection."
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
# Streams For Germany
streams = [
Germany(),
GermanyAgeGroups(),
GermanyHistoryCases(config=config),
GermanHistoryIncidence(config=config),
GermanHistoryDeaths(config=config),
GermanHistoryRecovered(config=config),
GermanHistoryFrozenIncidence(config=config),
GermanHistoryHospitalization(config=config),
]
# Streams For States Of Germany
streams.extend(
[
GermanyStates(),
GermanyStatesAgeGroups(),
StatesHistoryCases(config=config),
StatesHistoryIncidence(config=config),
StatesHistoryFrozenIncidence(config=config),
StatesHistoryDeaths(config=config),
StatesHistoryRecovered(config=config),
StatesHistoryHospitalization(config=config),
]
)
return streams
|
a376eeed17199c43858b33dc87980d64d7728399
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/tflite/src/tensorflow/python/lib/core/custom_float_test.py
|
a182b32e1524372168dfa9217ff4ca452d373efa
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 28,108
|
py
|
custom_float_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for custom float Python types."""
import collections
import copy
import itertools
import math
import sys
from typing import Type
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.core import _pywrap_float8
from tensorflow.python.lib.core import _pywrap_custom_casts
from tensorflow.python.platform import test
from tensorflow.tsl.python.lib.core import pywrap_bfloat16
bfloat16 = pywrap_bfloat16.bfloat16_type()
float8_e4m3b11 = pywrap_bfloat16.float8_e4m3b11_type()
float8_e4m3fn = _pywrap_float8.TF_float8_e4m3fn_type()
float8_e5m2 = _pywrap_float8.TF_float8_e5m2_type()
_pywrap_custom_casts.TF_register_custom_casts()
def numpy_assert_allclose(a, b, float_type, **kwargs):
a = a.astype(np.float32) if a.dtype == float_type else a
b = b.astype(np.float32) if b.dtype == float_type else b
return np.testing.assert_allclose(a, b, **kwargs)
def numpy_promote_types(
a: Type[np.generic], b: Type[np.generic], float_type: Type[np.generic],
next_largest_fp_type: Type[np.generic]) -> Type[np.generic]:
if a == float_type and b == float_type:
return float_type
if a == float_type:
a = next_largest_fp_type
if b == float_type:
b = next_largest_fp_type
return np.promote_types(a, b)
def truncate(x, float_type):
if isinstance(x, np.ndarray):
return x.astype(float_type).astype(np.float32)
else:
return type(x)(float_type(x))
def test_binary_operation(a, b, op, float_type):
a = float_type(a)
b = float_type(b)
expected = op(np.float32(a), np.float32(b))
result = op(a, b)
if math.isnan(expected):
if not math.isnan(result):
raise AssertionError("%s expected to be nan." % repr(result))
else:
np.testing.assert_equal(
truncate(expected, float_type=float_type), float(result))
def dtype_has_inf(dtype):
"""Determines if the dtype has an `inf` representation."""
inf = float("inf")
is_inf = False
try:
x = dtype(inf)
is_inf = np.isinf(x)
except (OverflowError, ValueError):
pass
return is_inf
# Configure bounds and properties for our custom types, to be used in tests
# below.
FLOAT_EPSILON = {
bfloat16: float.fromhex("1.0p-7"),
float8_e4m3b11: float.fromhex("1.0p-3"),
float8_e4m3fn: float.fromhex("1.0p-3"),
float8_e5m2: float.fromhex("1.0p-2"),
}
FLOAT_MAX = {
bfloat16: float.fromhex("1.FEp127"),
float8_e4m3b11: float.fromhex("1.Ep4"),
float8_e4m3fn: float.fromhex("1.Cp8"),
float8_e5m2: float.fromhex("1.Cp15"),
}
FLOAT_SMALLEST_SUBNORMAL = {
bfloat16: float.fromhex("1.0p-133"),
float8_e4m3b11: float.fromhex("1.0p-13"),
float8_e4m3fn: float.fromhex("1.0p-9"),
float8_e5m2: float.fromhex("1.0p-16"),
}
FLOAT_SMALLEST_NORMAL = {
bfloat16: float.fromhex("1.0p-126"),
float8_e4m3b11: float.fromhex("1.0p-10"),
float8_e4m3fn: float.fromhex("1.0p-6"),
float8_e5m2: float.fromhex("1.0p-14"),
}
# Values that should round trip exactly to float and back.
# pylint: disable=g-complex-comprehension
FLOAT_VALUES = {
dtype: [
0.0, 1.0, -1.0, 0.5, -0.5, FLOAT_EPSILON[dtype],
1.0 + FLOAT_EPSILON[dtype], 1.0 - FLOAT_EPSILON[dtype],
-1.0 - FLOAT_EPSILON[dtype], -1.0 + FLOAT_EPSILON[dtype], 3.5, 4, 5, 7,
FLOAT_MAX[dtype], -FLOAT_MAX[dtype], float("nan"), float("-nan"),
float("inf") if dtype_has_inf(dtype) else 0.0,
float("-inf") if dtype_has_inf(dtype) else 0.0
] for dtype in FLOAT_EPSILON.keys()
}
# Values that should round trip exactly to integer and back.
INT_VALUES = {
bfloat16: [0, 1, 2, 10, 34, 47, 128, 255, 256, 512],
float8_e4m3b11:
list(range(0, 30, 2)) + list(range(1, 15, 2)),
float8_e4m3fn: [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22,
24, 26, 28, 30, 32, 36, 40, 44, 48, 52, 56, 60, 64, 72, 80, 88, 96, 104,
112, 120, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352,
384, 416, 448
],
float8_e5m2: [
0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768,
896, 1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120, 6144,
7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 32768,
40960, 49152, 57344
]
}
BITS_TYPE = {
bfloat16: np.uint16,
float8_e4m3b11: np.uint8,
float8_e4m3fn: np.uint8,
float8_e5m2: np.uint8
}
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + dtype.__name__,
"float_type": dtype
} for dtype in [bfloat16, float8_e4m3b11, float8_e4m3fn, float8_e5m2]))
class CustomFloatTest(parameterized.TestCase):
"""Tests the non-numpy Python methods of the custom float type."""
def testRoundTripToFloat(self, float_type):
for v in FLOAT_VALUES[float_type]:
np.testing.assert_equal(v, float(float_type(v)))
def testRoundTripNumpyTypes(self, float_type):
for dtype in [np.float16, np.float32, np.float64, np.longdouble]:
for f in FLOAT_VALUES[float_type]:
np.testing.assert_equal(dtype(f), dtype(float_type(dtype(f))))
np.testing.assert_equal(float(dtype(f)), float(float_type(dtype(f))))
np.testing.assert_equal(dtype(f), dtype(float_type(np.array(f, dtype))))
np.testing.assert_equal(
dtype(np.array(FLOAT_VALUES[float_type], float_type)),
np.array(FLOAT_VALUES[float_type], dtype))
def testRoundTripToInt(self, float_type):
for v in INT_VALUES[float_type]:
self.assertEqual(v, int(float_type(v)))
self.assertEqual(-v, int(float_type(-v)))
def testRoundTripToNumpy(self, float_type):
for dtype in [
float_type, np.float16, np.float32, np.float64, np.longdouble
]:
with self.subTest(dtype.__name__):
for v in FLOAT_VALUES[float_type]:
np.testing.assert_equal(dtype(v), dtype(float_type(dtype(v))))
np.testing.assert_equal(dtype(v), dtype(float_type(dtype(v))))
np.testing.assert_equal(
dtype(v), dtype(float_type(np.array(v, dtype))))
if dtype != float_type:
np.testing.assert_equal(
np.array(FLOAT_VALUES[float_type], dtype),
float_type(np.array(FLOAT_VALUES[float_type],
dtype)).astype(dtype))
def testBetweenCustomTypes(self, float_type):
for dtype in [bfloat16, float8_e4m3b11, float8_e4m3fn, float8_e5m2]:
x = np.array(FLOAT_VALUES[float_type], dtype=dtype)
y = x.astype(float_type)
z = x.astype(float).astype(float_type)
numpy_assert_allclose(y, z, float_type=float_type)
def testStr(self, float_type):
for value in FLOAT_VALUES[float_type]:
self.assertEqual("%.6g" % float(float_type(value)),
str(float_type(value)))
def testFromStr(self, float_type):
self.assertEqual(float_type(1.2), float_type("1.2"))
self.assertTrue(np.isnan(float_type("nan")))
self.assertTrue(np.isnan(float_type("-nan")))
if dtype_has_inf(float_type):
self.assertEqual(float_type(float("inf")), float_type("inf"))
self.assertEqual(float_type(float("-inf")), float_type("-inf"))
def testRepr(self, float_type):
for value in FLOAT_VALUES[float_type]:
self.assertEqual("%.6g" % float(float_type(value)),
repr(float_type(value)))
def testItem(self, float_type):
self.assertIsInstance(float_type(0).item(), float)
def testHashZero(self, float_type):
"""Tests that negative zero and zero hash to the same value."""
self.assertEqual(hash(float_type(-0.0)), hash(float_type(0.0)))
def testHashNumbers(self, float_type):
for value in np.extract(
np.isfinite(FLOAT_VALUES[float_type]), FLOAT_VALUES[float_type]):
with self.subTest(value):
self.assertEqual(hash(value), hash(float_type(value)), str(value))
def testHashNan(self, float_type):
for name, nan in [("PositiveNan", float_type(float("nan"))),
("NegativeNan", float_type(float("-nan")))]:
with self.subTest(name):
nan_hash = hash(nan)
nan_object_hash = object.__hash__(nan)
# The hash of a NaN is either 0 or a hash of the object pointer.
self.assertIn(nan_hash, (sys.hash_info.nan, nan_object_hash),
str(nan))
def testHashInf(self, float_type):
if dtype_has_inf(float_type):
self.assertEqual(sys.hash_info.inf, hash(float_type(float("inf"))), "inf")
self.assertEqual(-sys.hash_info.inf, hash(float_type(float("-inf"))),
"-inf")
# Tests for Python operations
def testNegate(self, float_type):
for v in FLOAT_VALUES[float_type]:
np.testing.assert_equal(
float(float_type(-float(float_type(v)))), float(-float_type(v)))
def testAdd(self, float_type):
for a, b in [(0, 0), (1, 0), (1, -1), (2, 3.5), (3.5, -2.25),
(float("inf"), -2.25), (float("-inf"), -2.25),
(3.5, float("nan"))]:
test_binary_operation(a, b, op=lambda a, b: a + b, float_type=float_type)
def testAddScalarTypePromotion(self, float_type):
"""Tests type promotion against Numpy scalar values."""
types = [float_type, np.float16, np.float32, np.float64, np.longdouble]
for lhs_type in types:
for rhs_type in types:
expected_type = numpy_promote_types(
lhs_type,
rhs_type,
float_type=float_type,
next_largest_fp_type=np.float32)
actual_type = type(lhs_type(3.5) + rhs_type(2.25))
self.assertEqual(expected_type, actual_type)
def testAddArrayTypePromotion(self, float_type):
self.assertEqual(np.float32,
type(float_type(3.5) + np.array(2.25, np.float32)))
self.assertEqual(np.float32,
type(np.array(3.5, np.float32) + float_type(2.25)))
def testSub(self, float_type):
for a, b in [(0, 0), (1, 0), (1, -1), (2, 3.5), (3.5, -2.25),
(-2.25, float("inf")), (-2.25, float("-inf")),
(3.5, float("nan"))]:
test_binary_operation(a, b, op=lambda a, b: a - b, float_type=float_type)
def testMul(self, float_type):
for a, b in [(0, 0), (1, 0), (1, -1), (3.5, -2.25), (float("inf"), -2.25),
(float("-inf"), -2.25), (3.5, float("nan"))]:
test_binary_operation(a, b, op=lambda a, b: a * b, float_type=float_type)
def testDiv(self, float_type):
for a, b in [(0, 0), (1, 0), (1, -1), (2, 3.5), (3.5, -2.25),
(float("inf"), -2.25), (float("-inf"), -2.25),
(3.5, float("nan"))]:
test_binary_operation(a, b, op=lambda a, b: a / b, float_type=float_type)
def testLess(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v < w, float_type(v) < float_type(w))
def testLessEqual(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v <= w, float_type(v) <= float_type(w))
def testGreater(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v > w, float_type(v) > float_type(w))
def testGreaterEqual(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v >= w, float_type(v) >= float_type(w))
def testEqual(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v == w, float_type(v) == float_type(w))
def testNotEqual(self, float_type):
for v in FLOAT_VALUES[float_type]:
for w in FLOAT_VALUES[float_type]:
self.assertEqual(v != w, float_type(v) != float_type(w))
def testNan(self, float_type):
a = np.isnan(float_type(float("nan")))
self.assertTrue(a)
numpy_assert_allclose(
np.array([1.0, a]), np.array([1.0, a]), float_type=float_type)
a = np.array(
[float_type(1.34375),
float_type(1.4375),
float_type(float("nan"))],
dtype=float_type)
b = np.array(
[float_type(1.3359375),
float_type(1.4375),
float_type(float("nan"))],
dtype=float_type)
numpy_assert_allclose(
a,
b,
rtol=0.1,
atol=0.1,
equal_nan=True,
err_msg="",
verbose=True,
float_type=float_type)
def testSort(self, float_type):
# Note: np.sort doesn't work properly with NaNs since they always compare
# False.
values_to_sort = np.float32(
[x for x in FLOAT_VALUES[float_type] if not np.isnan(x)])
sorted_f32 = np.sort(values_to_sort)
sorted_float_type = np.sort(values_to_sort.astype(float_type)) # pylint: disable=too-many-function-args
np.testing.assert_equal(sorted_f32, np.float32(sorted_float_type))
def testArgmax(self, float_type):
values_to_sort = np.float32(
float_type(np.float32(FLOAT_VALUES[float_type])))
argmax_f32 = np.argmax(values_to_sort)
argmax_float_type = np.argmax(values_to_sort.astype(float_type)) # pylint: disable=too-many-function-args
np.testing.assert_equal(argmax_f32, argmax_float_type)
def testArgmaxOnNan(self, float_type):
"""Ensures we return the right thing for multiple NaNs."""
one_with_nans = np.array(
[1.0, float("nan"), float("nan")], dtype=np.float32)
np.testing.assert_equal(
np.argmax(one_with_nans.astype(float_type)), np.argmax(one_with_nans))
def testArgmaxOnNegativeInfinity(self, float_type):
"""Ensures we return the right thing for negative infinities."""
inf = np.array([float("-inf")], dtype=np.float32)
np.testing.assert_equal(np.argmax(inf.astype(float_type)), np.argmax(inf))
def testArgmin(self, float_type):
values_to_sort = np.float32(
float_type(np.float32(FLOAT_VALUES[float_type])))
argmin_f32 = np.argmin(values_to_sort)
argmin_float_type = np.argmin(values_to_sort.astype(float_type)) # pylint: disable=too-many-function-args
np.testing.assert_equal(argmin_f32, argmin_float_type)
def testArgminOnNan(self, float_type):
"""Ensures we return the right thing for multiple NaNs."""
one_with_nans = np.array(
[1.0, float("nan"), float("nan")], dtype=np.float32)
np.testing.assert_equal(
np.argmin(one_with_nans.astype(float_type)), np.argmin(one_with_nans))
def testArgminOnPositiveInfinity(self, float_type):
"""Ensures we return the right thing for positive infinities."""
inf = np.array([float("inf")], dtype=np.float32)
np.testing.assert_equal(np.argmin(inf.astype(float_type)), np.argmin(inf))
def testDtypeFromString(self, float_type):
assert np.dtype(float_type.__name__) == np.dtype(float_type)
BinaryOp = collections.namedtuple("BinaryOp", ["op"])
UNARY_UFUNCS = [
np.negative, np.positive, np.absolute, np.fabs, np.rint, np.sign,
np.conjugate, np.exp, np.exp2, np.expm1, np.log, np.log10, np.log1p,
np.log2, np.sqrt, np.square, np.cbrt, np.reciprocal, np.sin, np.cos, np.tan,
np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh,
np.arccosh, np.arctanh, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc
]
BINARY_UFUNCS = [
np.add, np.subtract, np.multiply, np.divide, np.logaddexp, np.logaddexp2,
np.floor_divide, np.power, np.remainder, np.fmod, np.heaviside, np.arctan2,
np.hypot, np.maximum, np.minimum, np.fmax, np.fmin, np.copysign
]
BINARY_PREDICATE_UFUNCS = [
np.equal, np.not_equal, np.less, np.greater, np.less_equal,
np.greater_equal, np.logical_and, np.logical_or, np.logical_xor
]
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + dtype.__name__,
"float_type": dtype
} for dtype in [bfloat16, float8_e4m3b11, float8_e4m3fn, float8_e5m2]))
class CustomFloatNumPyTest(parameterized.TestCase):
"""Tests NumPy integration of the custom float types."""
def testDtype(self, float_type):
self.assertEqual(float_type, np.dtype(float_type))
def testDeepCopyDoesNotAlterHash(self, float_type):
# For context, see https://github.com/google/jax/issues/4651. If the hash
# value of the type descriptor is not initialized correctly, a deep copy
# can change the type hash.
dtype = np.dtype(float_type)
h = hash(dtype)
_ = copy.deepcopy(dtype)
self.assertEqual(h, hash(dtype))
def testArray(self, float_type):
x = np.array([[1, 2, 3]], dtype=float_type)
self.assertEqual(float_type, x.dtype)
self.assertEqual("[[1 2 3]]", str(x))
np.testing.assert_equal(x, x)
numpy_assert_allclose(x, x, float_type=float_type)
self.assertTrue((x == x).all())
def testComparisons(self, float_type):
x = np.array([30, 7, -30], dtype=np.float32)
bx = x.astype(float_type)
y = np.array([17, 7, 0], dtype=np.float32)
by = y.astype(float_type)
np.testing.assert_equal(x == y, bx == by)
np.testing.assert_equal(x != y, bx != by)
np.testing.assert_equal(x < y, bx < by)
np.testing.assert_equal(x > y, bx > by)
np.testing.assert_equal(x <= y, bx <= by)
np.testing.assert_equal(x >= y, bx >= by)
def testEqual2(self, float_type):
a = np.array([31], float_type)
b = np.array([15], float_type)
self.assertFalse(a.__eq__(b))
def testCanCast(self, float_type):
allowed_casts = [
(np.bool_, float_type),
(np.int8, float_type),
(np.uint8, float_type),
(float_type, np.float32),
(float_type, np.float64),
(float_type, np.longdouble),
(float_type, np.complex64),
(float_type, np.complex128),
(float_type, np.clongdouble),
]
all_dtypes = [
np.float16, np.float32, np.float64, np.longdouble, np.int8, np.int16,
np.int32, np.int64, np.complex64, np.complex128, np.clongdouble,
np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_,
np.longlong, np.uintc, np.ulonglong
]
for d in all_dtypes:
with self.subTest(d.__name__):
self.assertEqual((float_type, d) in allowed_casts,
np.can_cast(float_type, d))
self.assertEqual((d, float_type) in allowed_casts,
np.can_cast(d, float_type))
def testCasts(self, float_type):
for dtype in [
np.float16, np.float32, np.float64, np.longdouble, np.int8, np.int16,
np.int32, np.int64, np.complex64, np.complex128, np.clongdouble,
np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_,
np.longlong, np.uintc, np.ulonglong
]:
x = np.array([[1, 2, 3]], dtype=dtype)
y = x.astype(float_type)
z = y.astype(dtype)
self.assertTrue(np.all(x == y))
self.assertEqual(float_type, y.dtype)
self.assertTrue(np.all(x == z))
self.assertEqual(dtype, z.dtype)
def testConformNumpyComplex(self, float_type):
for dtype in [np.complex64, np.complex128, np.clongdouble]:
x = np.array([1.5, 2.5 + 2.j, 3.5], dtype=dtype)
y_np = x.astype(np.float32)
y_tf = x.astype(float_type)
numpy_assert_allclose(y_np, y_tf, atol=2e-2, float_type=float_type)
z_np = y_np.astype(dtype)
z_tf = y_tf.astype(dtype)
numpy_assert_allclose(z_np, z_tf, atol=2e-2, float_type=float_type)
def testArange(self, float_type):
np.testing.assert_equal(
np.arange(100, dtype=np.float32).astype(float_type),
np.arange(100, dtype=float_type))
np.testing.assert_equal(
np.arange(-8, 8, 1, dtype=np.float32).astype(float_type),
np.arange(-8, 8, 1, dtype=float_type))
np.testing.assert_equal(
np.arange(-0., -2., -0.25, dtype=np.float32).astype(float_type),
np.arange(-0., -2., -0.25, dtype=float_type))
np.testing.assert_equal(
np.arange(-16., 16., 2., dtype=np.float32).astype(float_type),
np.arange(-16., 16., 2., dtype=float_type))
def testUnaryUfunc(self, float_type):
for op in UNARY_UFUNCS:
with self.subTest(op.__name__):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7, 10).astype(float_type)
numpy_assert_allclose(
op(x).astype(np.float32),
truncate(op(x.astype(np.float32)), float_type=float_type),
rtol=1e-4,
float_type=float_type)
def testBinaryUfunc(self, float_type):
for op in BINARY_UFUNCS:
with self.subTest(op.__name__):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7, 10).astype(float_type)
y = rng.randn(4, 1, 7, 10).astype(float_type)
numpy_assert_allclose(
op(x, y).astype(np.float32),
truncate(
op(x.astype(np.float32), y.astype(np.float32)),
float_type=float_type),
rtol=1e-4,
float_type=float_type)
def testBinaryPredicateUfunc(self, float_type):
for op in BINARY_PREDICATE_UFUNCS:
with self.subTest(op.__name__):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(float_type)
y = rng.randn(4, 1, 7).astype(float_type)
np.testing.assert_equal(
op(x, y), op(x.astype(np.float32), y.astype(np.float32)))
def testPredicateUfunc(self, float_type):
for op in [np.isfinite, np.isinf, np.isnan, np.signbit, np.logical_not]:
with self.subTest(op.__name__):
rng = np.random.RandomState(seed=42)
shape = (3, 7, 10)
posinf_flips = rng.rand(*shape) < 0.1
neginf_flips = rng.rand(*shape) < 0.1
nan_flips = rng.rand(*shape) < 0.1
vals = rng.randn(*shape)
vals = np.where(posinf_flips, np.inf, vals)
vals = np.where(neginf_flips, -np.inf, vals)
vals = np.where(nan_flips, np.nan, vals)
vals = vals.astype(float_type)
np.testing.assert_equal(op(vals), op(vals.astype(np.float32)))
def testDivmod(self, float_type):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(float_type)
y = rng.randn(4, 1, 7).astype(float_type)
o1, o2 = np.divmod(x, y)
e1, e2 = np.divmod(x.astype(np.float32), y.astype(np.float32))
numpy_assert_allclose(
o1,
truncate(e1, float_type=float_type),
rtol=1e-2,
float_type=float_type)
numpy_assert_allclose(
o2,
truncate(e2, float_type=float_type),
rtol=1e-2,
float_type=float_type)
def testModf(self, float_type):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(float_type)
o1, o2 = np.modf(x)
e1, e2 = np.modf(x.astype(np.float32))
numpy_assert_allclose(
o1.astype(np.float32),
truncate(e1, float_type=float_type),
rtol=1e-2,
float_type=float_type)
numpy_assert_allclose(
o2.astype(np.float32),
truncate(e2, float_type=float_type),
rtol=1e-2,
float_type=float_type)
def testLdexp(self, float_type):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(float_type)
y = rng.randint(-50, 50, (1, 7)).astype(np.int32)
self.assertEqual(np.ldexp(x, y).dtype, x.dtype)
numpy_assert_allclose(
np.ldexp(x, y).astype(np.float32),
truncate(np.ldexp(x.astype(np.float32), y), float_type=float_type),
rtol=1e-2,
atol=1e-6,
float_type=float_type)
def testFrexp(self, float_type):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(float_type)
mant1, exp1 = np.frexp(x)
mant2, exp2 = np.frexp(x.astype(np.float32))
np.testing.assert_equal(exp1, exp2)
numpy_assert_allclose(mant1, mant2, rtol=1e-2, float_type=float_type)
def testCopySign(self, float_type):
for bits in list(range(1, 128)):
with self.subTest(bits):
bits_type = BITS_TYPE[float_type]
val = bits_type(bits).view(float_type)
val_with_sign = np.copysign(val, float_type(-1))
val_with_sign_bits = val_with_sign.view(bits_type)
num_bits = np.iinfo(bits_type).bits
np.testing.assert_equal(bits | (1 << (num_bits - 1)),
val_with_sign_bits)
def testNextAfter(self, float_type):
one = np.array(1., dtype=float_type)
two = np.array(2., dtype=float_type)
zero = np.array(0., dtype=float_type)
nan = np.array(np.nan, dtype=float_type)
np.testing.assert_equal(
np.nextafter(one, two) - one, FLOAT_EPSILON[float_type])
np.testing.assert_equal(
np.nextafter(one, zero) - one, -FLOAT_EPSILON[float_type] / 2)
np.testing.assert_equal(np.isnan(np.nextafter(nan, one)), True)
np.testing.assert_equal(np.isnan(np.nextafter(one, nan)), True)
np.testing.assert_equal(np.nextafter(one, one), one)
smallest_denormal = FLOAT_SMALLEST_SUBNORMAL[float_type]
np.testing.assert_equal(np.nextafter(zero, one), smallest_denormal)
np.testing.assert_equal(np.nextafter(zero, -one), -smallest_denormal)
for a, b in itertools.permutations([0., nan], 2):
np.testing.assert_equal(
np.nextafter(
np.array(a, dtype=np.float32), np.array(b, dtype=np.float32)),
np.nextafter(
np.array(a, dtype=float_type), np.array(b, dtype=float_type)))
def testSpacing(self, float_type):
# Sweep a variety of binades to see that spacing gives the proper ULP.
with self.subTest(name="Subnormals"):
for i in range(
int(np.log2(FLOAT_SMALLEST_SUBNORMAL[float_type])),
int(np.log2(FLOAT_SMALLEST_NORMAL[float_type]))):
power_of_two = float_type(2.0**i)
distance = FLOAT_SMALLEST_SUBNORMAL[float_type]
np.testing.assert_equal(np.spacing(power_of_two), distance)
np.testing.assert_equal(np.spacing(-power_of_two), -distance)
# Normals have a distance which depends on their binade.
with self.subTest(name="Normals"):
for i in range(
int(np.log2(FLOAT_SMALLEST_NORMAL[float_type])),
int(np.log2(FLOAT_MAX[float_type]))):
power_of_two = float_type(2.0**i)
distance = FLOAT_EPSILON[float_type] * power_of_two
np.testing.assert_equal(np.spacing(power_of_two), distance)
np.testing.assert_equal(np.spacing(-power_of_two), -distance)
# Check that spacing agrees with arithmetic involving nextafter.
with self.subTest(name="NextAfter"):
for x in FLOAT_VALUES[float_type]:
x_float_type = float_type(x)
spacing = np.spacing(x_float_type)
toward = np.copysign(float_type(2.0 * np.abs(x) + 1), x_float_type)
nextup = np.nextafter(x_float_type, toward)
if np.isnan(spacing):
self.assertTrue(np.isnan(nextup - x_float_type))
else:
np.testing.assert_equal(spacing, nextup - x_float_type)
# Check that spacing for special values gives the correct answer.
with self.subTest(name="NonFinite"):
nan = float_type(float("nan"))
np.testing.assert_equal(np.spacing(nan), np.spacing(np.float32(nan)))
if dtype_has_inf(float_type):
inf = float_type(float("inf"))
np.testing.assert_equal(np.spacing(inf), np.spacing(np.float32(inf)))
if __name__ == "__main__":
absltest.main()
|
0905befc579b61a02fdd335d35d7ed3f0c02883f
|
82b05fc158acbb10263a9e2415caf31ed4ea1ff4
|
/graphbrain/constants.py
|
e89ea8474993d01e50e063d6ecba1c182666faf8
|
[
"MIT"
] |
permissive
|
graphbrain/graphbrain
|
e655de5c9f7d755b7a34649a461762d7def501ff
|
8cb019eeea4bfba036f66ca742f1b4c3fc2c9c6a
|
refs/heads/master
| 2023-09-04T04:07:04.985162
| 2023-07-19T12:41:20
| 2023-07-19T12:41:20
| 51,751,006
| 534
| 60
|
MIT
| 2023-03-10T21:32:47
| 2016-02-15T11:25:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
constants.py
|
# Pre-defined system atoms
sequence_connector = '_seq'
sequence_attrs_connector = '_seq_attrs'
compound_noun_builder = '+/B/.'
lemma_connector = '_lemma'
coref_connector = '_coref'
coref_res_connector = '_coref_res'
main_coref_connector = '_main_coref'
type_of_connector = '_type_of'
is_connector = '_is'
possessive_builder = 'poss/Bp.am/.'
gender_connector = '_gender'
number_connector = '_number'
animacy_connector = '_animacy'
singular_plural_connector = '_sng_pl'
parser_coref_connector = '_pcoref'
inference_srcs_connector = '_infsrcs'
list_or_matches_builder = 'list/J/.'
# Pre-defined entity keys
coref_set_id_key = 'coref'
# Logo
ascii_logo = r"""
_ _ _
| | | | (_)
__ _ _ __ __ _ _ __ | |__ | |__ _ __ __ _ _ _ __
/ _` | '__/ _` | '_ \| '_ \| '_ \| '__/ _` | | '_ \
| (_| | | | (_| | |_) | | | | |_) | | | (_| | | | | |
\__, |_| \__,_| .__/|_| |_|_.__/|_| \__,_|_|_| |_|
__/ | | |
|___/ |_|
"""
|
ed55c4d3c89f3d665c5a570b1a28c25bf81d26ac
|
f791462fb1286607d16459c1602d133f8d8c8b59
|
/examples/zero_inflated_poisson.py
|
b216563345d6a319a0a96b88c314042558414d25
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/numpyro
|
b071ed2bd93be41bafc3da8764c9f5617f996d92
|
ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2
|
refs/heads/master
| 2023-09-03T15:56:13.252692
| 2023-08-28T14:32:25
| 2023-08-28T14:32:25
| 170,580,540
| 1,941
| 219
|
Apache-2.0
| 2023-09-04T11:26:11
| 2019-02-13T21:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,856
|
py
|
zero_inflated_poisson.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Zero-Inflated Poisson regression model
================================================
In this example, we model and predict how many fish are caught by visitors to a state park.
Many groups of visitors catch zero fish, either because they did not fish at all or because
they were unlucky. We would like to explicitly model this bimodal behavior (zero versus non-zero)
and ascertain which variables contribute to each behavior.
We answer this question by fitting a zero-inflated poisson regression model. We use MAP,
VI and MCMC as estimation methods. Finally, from the MCMC samples, we identify the variables that
contribute to the zero and non-zero components of the zero-inflated poisson likelihood.
"""
import argparse
import os
import random
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
import jax.numpy as jnp
from jax.random import PRNGKey
import jax.scipy as jsp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, SVI, Predictive, Trace_ELBO, autoguide
matplotlib.use("Agg") # noqa: E402
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def model(X, Y):
D_X = X.shape[1]
b1 = numpyro.sample("b1", dist.Normal(0.0, 1.0).expand([D_X]).to_event(1))
b2 = numpyro.sample("b2", dist.Normal(0.0, 1.0).expand([D_X]).to_event(1))
q = jsp.special.expit(jnp.dot(X, b1[:, None])).reshape(-1)
lam = jnp.exp(jnp.dot(X, b2[:, None]).reshape(-1))
with numpyro.plate("obs", X.shape[0]):
numpyro.sample("Y", dist.ZeroInflatedPoisson(gate=q, rate=lam), obs=Y)
def run_mcmc(model, args, X, Y):
kernel = NUTS(model)
mcmc = MCMC(
kernel,
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(PRNGKey(1), X, Y)
mcmc.print_summary()
return mcmc.get_samples()
def run_svi(model, guide_family, args, X, Y):
if guide_family == "AutoDelta":
guide = autoguide.AutoDelta(model)
elif guide_family == "AutoDiagonalNormal":
guide = autoguide.AutoDiagonalNormal(model)
optimizer = numpyro.optim.Adam(0.001)
svi = SVI(model, guide, optimizer, Trace_ELBO())
svi_results = svi.run(PRNGKey(1), args.maxiter, X=X, Y=Y)
params = svi_results.params
return params, guide
def main(args):
set_seed(args.seed)
# prepare dataset
df = pd.read_stata("http://www.stata-press.com/data/r11/fish.dta")
df["intercept"] = 1
cols = ["livebait", "camper", "persons", "child", "intercept"]
mask = np.random.randn(len(df)) < args.train_size
df_train = df[mask]
df_test = df[~mask]
X_train = jnp.asarray(df_train[cols].values)
y_train = jnp.asarray(df_train["count"].values)
X_test = jnp.asarray(df_test[cols].values)
y_test = jnp.asarray(df_test["count"].values)
print("run MAP.")
map_params, map_guide = run_svi(model, "AutoDelta", args, X_train, y_train)
print("run VI.")
vi_params, vi_guide = run_svi(model, "AutoDiagonalNormal", args, X_train, y_train)
print("run MCMC.")
posterior_samples = run_mcmc(model, args, X_train, y_train)
# evaluation
def svi_predict(model, guide, params, args, X):
predictive = Predictive(
model=model, guide=guide, params=params, num_samples=args.num_samples
)
predictions = predictive(PRNGKey(1), X=X, Y=None)
svi_predictions = jnp.rint(predictions["Y"].mean(0))
return svi_predictions
map_predictions = svi_predict(model, map_guide, map_params, args, X_test)
vi_predictions = svi_predict(model, vi_guide, vi_params, args, X_test)
predictive = Predictive(model, posterior_samples=posterior_samples)
predictions = predictive(PRNGKey(1), X=X_test, Y=None)
mcmc_predictions = jnp.rint(predictions["Y"].mean(0))
print(
"MAP RMSE: ",
mean_squared_error(y_test.to_py(), map_predictions.to_py(), squared=False),
)
print(
"VI RMSE: ",
mean_squared_error(y_test.to_py(), vi_predictions.to_py(), squared=False),
)
print(
"MCMC RMSE: ",
mean_squared_error(y_test.to_py(), mcmc_predictions.to_py(), squared=False),
)
# make plot
fig, axes = plt.subplots(2, 1, figsize=(6, 6), constrained_layout=True)
def add_fig(var_name, title, ax):
ax.set_title(title)
ax.violinplot(
[posterior_samples[var_name][:, i].to_py() for i in range(len(cols))]
)
ax.set_xticks(np.arange(1, len(cols) + 1))
ax.set_xticklabels(cols, rotation=45, fontsize=10)
add_fig("b1", "Coefficients for probability of catching fish", axes[0])
add_fig("b2", "Coefficients for the number of fish caught", axes[1])
plt.savefig("zip_fish.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Zero-Inflated Poisson Regression")
parser.add_argument("--seed", nargs="?", default=42, type=int)
parser.add_argument("-n", "--num-samples", nargs="?", default=2000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1000, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument("--num-data", nargs="?", default=100, type=int)
parser.add_argument("--maxiter", nargs="?", default=5000, type=int)
parser.add_argument("--train-size", nargs="?", default=0.8, type=float)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
96637f09ebfa7426b343f7d1b42598e2a1987f0e
|
86c9e888f522ede1b231ad324c234555268224cc
|
/tests/block/test_block_2.py
|
a2aea4dab6ddf695da6844de0c18f88d36fd02e2
|
[
"BSD-3-Clause"
] |
permissive
|
python-lz4/python-lz4
|
4502344a73340ad9207a1d824ac6d3407bdcb22c
|
58df0834b57d485f2483bf5ccf1007c313b25557
|
refs/heads/master
| 2023-08-15T10:16:21.588003
| 2023-01-01T16:18:26
| 2023-01-01T16:18:26
| 57,201,963
| 250
| 85
|
BSD-3-Clause
| 2023-01-01T16:18:27
| 2016-04-27T09:39:00
|
C
|
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
test_block_2.py
|
import pytest
import sys
import lz4.block
import psutil
import os
# This test requires allocating a big lump of memory. In order to
# avoid a massive memory allocation during byte compilation, we have
# to declare a variable for the size of the buffer we're going to
# create outside the scope of the function below. See:
# https://bugs.python.org/issue21074
_4GB = 0x100000000 # 4GB
# This test will be killed on Travis due to the 3GB memory limit
# there. Unfortunately psutil reports the host memory, not the memory
# available to the container, and so can't be used to detect available
# memory, so instead, as an ugly hack for detecting we're on Travis we
# check for the TRAVIS environment variable being set. This is quite
# fragile.
@pytest.mark.skipif(
os.environ.get('TRAVIS') is not None,
reason='Skipping test on Travis due to insufficient memory'
)
@pytest.mark.skipif(
sys.maxsize < 0xffffffff,
reason='Py_ssize_t too small for this test'
)
@pytest.mark.skipif(
psutil.virtual_memory().available < _4GB,
reason='Insufficient system memory for this test'
)
def test_huge():
try:
huge = b'\0' * _4GB
except MemoryError:
pytest.skip('Insufficient system memory for this test')
with pytest.raises(
OverflowError, match='Input too large for LZ4 API'
):
lz4.block.compress(huge)
with pytest.raises(
OverflowError, match='Dictionary too large for LZ4 API'
):
lz4.block.compress(b'', dict=huge)
with pytest.raises(
OverflowError, match='Input too large for LZ4 API'
):
lz4.block.decompress(huge)
with pytest.raises(
OverflowError, match='Dictionary too large for LZ4 API'
):
lz4.block.decompress(b'', dict=huge)
def test_dummy():
pass
|
ce89018488971db547ab6b75fb6b9c03bdf83b18
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/StructuralAsset.py
|
4c50f48b07aaa8a2fd6033546f3686bf428bd1df
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 9,799
|
py
|
StructuralAsset.py
|
class StructuralAsset(object,IDisposable):
"""
Represents the properties of a material pertinent to structural analysis.
StructuralAsset(name: str,structuralAssetClass: StructuralAssetClass)
"""
def Copy(self):
"""
Copy(self: StructuralAsset) -> StructuralAsset
Produces a copy of the asset.
Returns: A copy of the asset.
"""
pass
def Dispose(self):
""" Dispose(self: StructuralAsset) """
pass
def Equals(self,*__args):
"""
Equals(self: StructuralAsset,other: StructuralAsset) -> bool
Determines whether this structural asset is equal to another.
other: The structural asset with which to compare this structural asset.
Returns: True if the given structural asset is equal to this one,otherwise false.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: StructuralAsset,disposing: bool) """
pass
def SetPoissonRatio(self,poissonRatio):
"""
SetPoissonRatio(self: StructuralAsset,poissonRatio: float)
Sets the Poisson ratio of the asset.
"""
pass
def SetShearModulus(self,shearModulus):
"""
SetShearModulus(self: StructuralAsset,shearModulus: float)
Sets the shear modulus of the asset.
"""
pass
def SetThermalExpansionCoefficient(self,thermalExpCoeff):
"""
SetThermalExpansionCoefficient(self: StructuralAsset,thermalExpCoeff: float)
Sets the thermal expansion coefficient of the asset.
"""
pass
def SetYoungModulus(self,youngModulus):
"""
SetYoungModulus(self: StructuralAsset,youngModulus: float)
Sets the Young's modulus of the asset.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,name,structuralAssetClass):
""" __new__(cls: type,name: str,structuralAssetClass: StructuralAssetClass) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Behavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Flag indicating whether elements of this material behave isotropically or orthotropically.
Get: Behavior(self: StructuralAsset) -> StructuralBehavior
Set: Behavior(self: StructuralAsset)=value
"""
ConcreteBendingReinforcement=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The bending reinforcement of the asset.
Get: ConcreteBendingReinforcement(self: StructuralAsset) -> float
Set: ConcreteBendingReinforcement(self: StructuralAsset)=value
"""
ConcreteCompression=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The compression strength of concrete-based assets.
Get: ConcreteCompression(self: StructuralAsset) -> float
Set: ConcreteCompression(self: StructuralAsset)=value
"""
ConcreteShearReinforcement=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The shear reinforcement of the asset.
Get: ConcreteShearReinforcement(self: StructuralAsset) -> float
Set: ConcreteShearReinforcement(self: StructuralAsset)=value
"""
ConcreteShearStrengthReduction=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The shear strength reduction of the asset.
Get: ConcreteShearStrengthReduction(self: StructuralAsset) -> float
Set: ConcreteShearStrengthReduction(self: StructuralAsset)=value
"""
DampingRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The damping ratio of the asset.
Get: DampingRatio(self: StructuralAsset) -> float
Set: DampingRatio(self: StructuralAsset)=value
"""
Density=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The density of the asset.
Get: Density(self: StructuralAsset) -> float
Set: Density(self: StructuralAsset)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: StructuralAsset) -> bool
"""
Lightweight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Flag indicating whether the asset describes a material that is light-weight or not.
Get: Lightweight(self: StructuralAsset) -> bool
Set: Lightweight(self: StructuralAsset)=value
"""
MetalReductionFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The reduction factor of the asset.
Get: MetalReductionFactor(self: StructuralAsset) -> float
Set: MetalReductionFactor(self: StructuralAsset)=value
"""
MetalResistanceCalculationStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The resistance calculation strength of the asset.
Get: MetalResistanceCalculationStrength(self: StructuralAsset) -> float
Set: MetalResistanceCalculationStrength(self: StructuralAsset)=value
"""
MinimumTensileStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The minimum tensile strength of the asset.
Get: MinimumTensileStrength(self: StructuralAsset) -> float
Set: MinimumTensileStrength(self: StructuralAsset)=value
"""
MinimumYieldStress=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The minimum yield stress of the asset.
Get: MinimumYieldStress(self: StructuralAsset) -> float
Set: MinimumYieldStress(self: StructuralAsset)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The name of the structural asset.
Get: Name(self: StructuralAsset) -> str
Set: Name(self: StructuralAsset)=value
"""
PoissonRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The Poisson ratio of the asset.
Get: PoissonRatio(self: StructuralAsset) -> XYZ
Set: PoissonRatio(self: StructuralAsset)=value
"""
ShearModulus=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The shear modulus of the asset.
Get: ShearModulus(self: StructuralAsset) -> XYZ
Set: ShearModulus(self: StructuralAsset)=value
"""
StructuralAssetClass=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The type of material that this structural asset describes (e.g. wood,concrete,metal.)
Get: StructuralAssetClass(self: StructuralAsset) -> StructuralAssetClass
"""
SubClass=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sub-class of the asset.
Get: SubClass(self: StructuralAsset) -> str
Set: SubClass(self: StructuralAsset)=value
"""
ThermalExpansionCoefficient=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The thermal expansion coefficient of the asset.
Get: ThermalExpansionCoefficient(self: StructuralAsset) -> XYZ
Set: ThermalExpansionCoefficient(self: StructuralAsset)=value
"""
WoodBendingStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The bending strength of the asset.
Get: WoodBendingStrength(self: StructuralAsset) -> float
Set: WoodBendingStrength(self: StructuralAsset)=value
"""
WoodGrade=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The grade of wood used in a wood-based asset.
Get: WoodGrade(self: StructuralAsset) -> str
Set: WoodGrade(self: StructuralAsset)=value
"""
WoodParallelCompressionStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The parallel compression strength of the asset.
Get: WoodParallelCompressionStrength(self: StructuralAsset) -> float
Set: WoodParallelCompressionStrength(self: StructuralAsset)=value
"""
WoodParallelShearStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The parallel shear strength of the asset.
Get: WoodParallelShearStrength(self: StructuralAsset) -> float
Set: WoodParallelShearStrength(self: StructuralAsset)=value
"""
WoodPerpendicularCompressionStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The perpendicular compression strength of the asset.
Get: WoodPerpendicularCompressionStrength(self: StructuralAsset) -> float
Set: WoodPerpendicularCompressionStrength(self: StructuralAsset)=value
"""
WoodPerpendicularShearStrength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The perpendicular shear strength of the asset.
Get: WoodPerpendicularShearStrength(self: StructuralAsset) -> float
Set: WoodPerpendicularShearStrength(self: StructuralAsset)=value
"""
WoodSpecies=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The species of wood used in a wood-based asset.
Get: WoodSpecies(self: StructuralAsset) -> str
Set: WoodSpecies(self: StructuralAsset)=value
"""
YoungModulus=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The Young's modulus of the asset.
Get: YoungModulus(self: StructuralAsset) -> XYZ
Set: YoungModulus(self: StructuralAsset)=value
"""
|
d80bf8a35efabc883a17d6662e83bd13784253f8
|
dfa1a1a263eab3ac8bbcb2a00297da7fc82bccfd
|
/src/graphs/zombie_in_a_matrix.py
|
7d3699821aa76845ce55d02f9a61179715dc5fb9
|
[] |
no_license
|
monpro/algorithm
|
07e79e7a85ca9fe86fac0b3c740de2f2037f5e89
|
a330e92191642e2965939a06b050ca84d4ed11a6
|
refs/heads/master
| 2021-07-01T03:49:59.040611
| 2020-08-25T12:36:54
| 2020-08-25T12:36:54
| 143,118,129
| 102
| 0
| null | 2020-05-31T04:22:46
| 2018-08-01T07:12:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
zombie_in_a_matrix.py
|
class Solution:
"""
@param grid: a 2D integer grid
@return: an integer
"""
def zombie(self, grid):
# write your code here
if grid == []:
return 0
delta = [[0, 1], [1, 0], [0, -1], [-1, 0]]
queue = []
row, column = len(grid), len(grid[0])
result = 0
for i in range(row):
for j in range(column):
if grid[i][j] == 1:
queue.append((i, j))
# the process of bfs
while queue != []:
next_day_queue = []
for i, j in queue:
for dx, dy in delta:
if 0 <= i + dx < row and 0 <= j + dy < column and grid[i + dx][j + dy] == 0:
next_day_queue.append((i + dx, j + dy))
grid[i + dx][j + dy] = 1
result += 1
queue = next_day_queue
for i in range(row):
for j in range(column):
if grid[i][j] == 0:
return -1
return result - 1
|
e6dfda080edbf66157bc688b28ae01a4e96e359e
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/app-testing/tests/protocol_analyze_test.py
|
f513820e07c0124201bf691ec6df2fc357ee6ff9
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,311
|
py
|
protocol_analyze_test.py
|
"""Test the Protocol Landing of the page."""
import os
import pytest
from automation.data.protocol import Protocol
from automation.data.protocols import Protocols
from automation.driver.drag_drop import drag_and_drop_file
from automation.menus.left_menu import LeftMenu
from automation.pages.labware_landing import LabwareLanding
from automation.pages.protocol_landing import ProtocolLanding
from rich.console import Console
from selenium.webdriver.chrome.webdriver import WebDriver
def _what_protocols() -> list[(Protocol)]:
"""Use the environment variable to select which protocols are used in the test."""
protocols: Protocols = Protocols()
protocols_to_test: str = os.getenv("APP_ANALYSIS_TEST_PROTOCOLS", "upload_protocol")
tests: list[(Protocol)] = []
for protocol_name in [x.strip() for x in protocols_to_test.split(",")]:
tests.append((getattr(protocols, protocol_name)))
return tests
@pytest.mark.parametrize(
"protocol",
_what_protocols(),
)
def test_analyses(
driver: WebDriver,
console: Console,
request: pytest.FixtureRequest,
protocol: Protocol,
) -> None:
"""Analyze a protocol in the app and validate its details."""
labware_landing: LabwareLanding = LabwareLanding(driver, console, request.node.nodeid)
left_menu: LeftMenu = LeftMenu(driver, console, request.node.nodeid)
protocol_landing: ProtocolLanding = ProtocolLanding(driver, console, request.node.nodeid)
# Upload labware if any
if protocol.custom_labware:
for labware in protocol.labware_paths:
left_menu.navigate("labware")
labware_landing.click_import_button()
assert labware_landing.get_import_custom_labware_definition_header().is_displayed()
assert labware_landing.get_choose_file_button().is_displayed()
console.print(
f"uploading labware: {labware.resolve()}",
style="white on blue",
)
drag_and_drop_file(labware_landing.get_drag_drop_file_button(), labware)
if labware_landing.get_success_toast_message(
filename=labware.name
) or labware_landing.get_duplicate_error_toast_message(filename=labware.name):
console.print(
f"{labware.name} uploaded to app.",
style="white on blue",
)
else:
raise AssertionError("No toast message that the labware was uploaded.")
left_menu.base.click(left_menu.protocols)
# Clean up any protocols that did not get deleted
protocol_landing.delete_all_protocols()
console.print(f"uploading protocol: {protocol.file_path.resolve()}", style="white on blue")
drag_and_drop_file(
protocol_landing.get_drag_drop_file_button(),
protocol.file_path,
)
analysis_timeout: int = 61
assert protocol_landing.wait_until_loading_data_gone(
timeout_sec=analysis_timeout
), f"Analysis took more than {analysis_timeout} seconds."
# look for analysis error if the protocol should have one
if protocol.app_error:
error_link = protocol_landing.get_error_details_safe()
assert error_link is not None, "No analysis error but was expecting one."
protocol_landing.base.click_webelement(error_link)
error_details = protocol_landing.get_popout_error().text
assert error_details == protocol.app_analysis_error
protocol_landing.click_popout_close()
else:
error_link = protocol_landing.get_error_details_safe()
if error_link is not None:
protocol_landing.base.click_webelement(error_link)
error_details = protocol_landing.get_popout_error().text
raise AssertionError(f"Unexpected analysis error: {error_details}")
# Verifying elements on Protocol Landing Page
# todo fix next line needs to be safe and print name not found
assert protocol_landing.get_deckMap_protocol_landing(protocol_name=protocol.protocol_name).is_displayed()
assert (
protocol_landing.get_protocol_name_text_protocol_landing(protocol_name=protocol.protocol_name)
== protocol.protocol_name
)
# TODO validate robot
# TODO verify modules
# No cleanup, do at the beginning of the test.
|
73f5f26784274cac3ac5934818ba44874ffbfdcb
|
2469e5c76c0f70ac64b0f333ad20ccc84a760522
|
/algos/ppo/ppo_utils/util.py
|
6f2735cc7f9d22629cb0d2d1060272a589ee1be9
|
[
"MIT"
] |
permissive
|
facebookresearch/nocturne
|
8454c10643ea4ff063e10d5f83a5a0d2d90ad1c2
|
ae0a4e361457caf6b7e397675cc86f46161405ed
|
refs/heads/main
| 2023-05-24T02:25:48.963308
| 2022-10-15T19:19:29
| 2022-10-15T19:19:29
| 503,843,020
| 216
| 21
|
MIT
| 2023-04-22T16:46:06
| 2022-06-15T16:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 690
|
py
|
util.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
|
b06d9e6c226fc1cae1330129e28a4dfcf6f9b94f
|
098e9d4eed49a0e4573d67022d78e85fd6d2944a
|
/utils/video_feature/merge_align_i3d.py
|
fb18c539255dfa91d6375f82404139131e3109c2
|
[
"MIT"
] |
permissive
|
jayleicn/TVRetrieval
|
36db2714b7a0c16c5fdfc2a69dd213bdc41e0670
|
d99a9ea7e724249047d6357f2a607c7ae256f8c6
|
refs/heads/master
| 2022-09-16T05:26:28.845738
| 2022-08-20T22:13:18
| 2022-08-20T22:13:18
| 236,402,810
| 141
| 28
|
MIT
| 2021-06-22T23:18:00
| 2020-01-27T01:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
merge_align_i3d.py
|
"""
Merge i3d features from all shows. Meanwhile, align it with the imagenet feature
so that they have the same number of feature vectors.
"""
import os
import h5py
import numpy as np
from tqdm import tqdm
from collections import Counter
def convert_for_single_h5(src_h5, tgt_h5, align_h5_key2len, debug=False):
"""
Args:
src_h5: h5py.File object, containing the frame level features
tgt_h5: h5py.File object, containing the clip level features
align_h5_key2len: dict, {key: len}, each value indicates the length (L) of the array (L, D)
debug:
Returns:
"""
for k, feat in tqdm(src_h5.items()):
if k in align_h5_key2len:
if len(feat) != align_h5_key2len[k]:
align_len = align_h5_key2len[k]
aligned_feat = np.zeros((align_h5_key2len[k], feat.shape[1]), dtype=np.float32)
aligned_feat[:len(feat)] = feat[:align_len]
feat = aligned_feat
tgt_h5.create_dataset(k, data=feat, dtype=np.float32)
else:
print("Skipping {}".format(k))
if debug:
break
def get_clip2frm_idx_mapping(clip_length=1.5, max_video_length=300):
""" This function depends on how the features are extracted.
original features are extract from frames (video fps=30):
[3, 13, 23] frame in a second.
Args:
clip_length: float,
max_video_length: int,
Returns:
{clip_idx1 (int): [frm_idx0, frm_idx1, ...],
...
}
"""
# frame 0 in the feature is actually the frame 3 in the original video, so its
# corresponding time is 3 / 30 = 0.1s. More generally ==> [0.1, 0.43, 0.77] + n.
frm2seconds = np.concatenate([
np.array([3, 13, 23]) / 30. + offset for offset in np.arange(0, max_video_length)], axis=0)
clip_boundaries = np.arange(0, max_video_length, clip_length)
# no need to worry about search boundary.
# indexed as clip_boundaries_in_frm_idx[idx]:clip_boundaries_in_frm_idx[idx+1]
clip_boundaries_in_frm_idx = np.searchsorted(frm2seconds, clip_boundaries)
return clip_boundaries_in_frm_idx
def main_convert():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--src_h5_files", type=str, nargs='+', help="frm .h5 file paths")
parser.add_argument("--tgt_h5_file", type=str, help=".h5 path to stores the converted data")
parser.add_argument("--align_h5_file", type=str, help=".h5 path to the file to align at length dim")
parser.add_argument("--check_alignment_only", action="store_true", help="Check alignment only")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
with h5py.File(args.align_h5_file, "r") as align_h5:
align_h5_key2len = {k: len(v) for k, v in tqdm(align_h5.items(), desc="[Get Length] Loop over align h5")}
src_h5_key2len = {}
for src_f in args.src_h5_files:
with h5py.File(src_f, "r") as src_h5:
for k, v in tqdm(src_h5.items(), desc="[Get length] Loop over one of the src h5"):
src_h5_key2len[k] = len(v)
not_found_keys = list(set(align_h5_key2len.keys()) - set(src_h5_key2len.keys()))
diff_key2len = {k: align_h5_key2len[k] - src_h5_key2len[k] for k in align_h5_key2len if k in src_h5_key2len}
diff_counter = Counter(list(diff_key2len.values()))
print("Not found keys total {}, examples: {}".format(len(not_found_keys), not_found_keys[:3]))
print("diff_counter {}".format(diff_counter.most_common()))
if not args.check_alignment_only:
assert not os.path.exists(args.tgt_h5_file)
with h5py.File(args.tgt_h5_file, "a") as tgt_h5:
for src_f in args.src_h5_files:
with h5py.File(src_f, "r") as src_h5:
convert_for_single_h5(src_h5, tgt_h5, align_h5_key2len, debug=args.debug)
if __name__ == '__main__':
main_convert()
|
97f2f98e1dfdd03f77343bb48829b21630562480
|
44d7c1c8c50b3008125298a4b76350cdfde2e229
|
/sr_model/Experimental_root/models/__init__.py
|
367d9611607dffe04cb99e615bdfc821947c5a3d
|
[
"MIT"
] |
permissive
|
Meta-Portrait/MetaPortrait
|
f501c0cab2443cc76b82d96f07b4d79e6a85052b
|
4c3c9e38d000f7db50c3a7ab736161f7aecc741a
|
refs/heads/main
| 2023-05-23T16:26:24.479909
| 2023-05-21T02:11:34
| 2023-05-21T02:11:34
| 578,117,887
| 357
| 22
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
__init__.py
|
import importlib
from basicsr.utils import scandir
from os import path as osp
# automatically scan and import model modules for registry
# scan all the files that end with '_model.py' under the model folder
model_folder = osp.dirname(osp.abspath(__file__))
model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
# import all the model modules
_model_modules = [importlib.import_module(f'Experimental_root.models.{file_name}') for file_name in model_filenames]
|
75ab6690d77e99948cf4bf3eaf9bc3afd8d4ff8e
|
af1306b0f3e99a2862c067fcdac3c47e7924422e
|
/scripts/transform2cfradial.py
|
f6c5335b2bae698f5271a8ba73123114002217ca
|
[
"MIT"
] |
permissive
|
YvZheng/pycwr
|
39b2345e2e9fa5313c7f0cd43060480e0d0558e6
|
8e2fc9b4b6434887fbb3f9450b1bb88ef0305e17
|
refs/heads/master
| 2023-09-04T08:11:55.758433
| 2023-08-30T03:35:05
| 2023-08-30T03:35:05
| 210,337,815
| 184
| 73
|
MIT
| 2023-08-15T06:55:02
| 2019-09-23T11:27:32
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
transform2cfradial.py
|
from pycwr.io import read_auto
import pyart
import sys
import os
def save_cfradial(china_radar_file, save_file=None):
"""
:param china_radar_file: radar data filename
:param save_file: savename of cfradial format data
:return:
"""
radar = read_auto(china_radar_file).ToPyartRadar()
if save_file is None:
save_file = china_radar_file + ".nc"
pyart.io.write_cfradial(save_file, radar)
return 0
if __name__ == "__main__":
if len(sys.argv) == 1:
print("warning using!!! example: transfrom2cfradial filename savename")
elif not os.path.exists(sys.argv[1]):
print("file is not exist!!!")
elif len(sys.argv) == 2:
save_cfradial(sys.argv[1])
else:
save_cfradial(sys.argv[1], sys.argv[2])
|
19dde5a7fdc1921d386ea78e7ea4876375843c75
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/stock/migrations/0042_auto_20200523_0121.py
|
66db1441e3cf32008c08cfc6570a45ec619cfbb7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
0042_auto_20200523_0121.py
|
# Generated by Django 3.0.5 on 2020-05-23 01:21
from django.db import migrations, models
import stock.models
class Migration(migrations.Migration):
dependencies = [
('stock', '0041_stockitemtestresult_notes'),
]
operations = [
migrations.AlterField(
model_name='stockitemtestresult',
name='attachment',
field=models.FileField(blank=True, help_text='Test result attachment', null=True, upload_to=stock.models.rename_stock_item_test_result_attachment, verbose_name='Attachment'),
),
]
|
767264cb26a1502b23883c66917de54afa528485
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/autotvm/record.py
|
cde78d1dbc312218e20bb419afcd27bdcad5786f
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 12,207
|
py
|
record.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=superfluous-parens, redefined-outer-name, redefined-outer-name,pointless-string-statement
# pylint: disable=consider-using-enumerate,invalid-name
"""Tuning record and serialization format"""
import argparse
import base64
from io import TextIOBase
import logging
import pickle
import json
import time
from typing import Union
import os
import itertools
from collections import OrderedDict
import numpy as np
from .. import build, lower
from ..target import Target
from ..contrib import popen_pool
from .. import __version__
from . import task
from .task import ConfigEntity, ApplyHistoryBest
from .measure import MeasureInput, MeasureResult
AUTOTVM_LOG_VERSION = 0.2
_old_version_warning = True
logger = logging.getLogger("autotvm")
try: # convert unicode to str for python2
_unicode = unicode
except NameError:
_unicode = ()
try:
_long = long
except NameError:
_long = int
def measure_str_key(inp, include_config=True):
"""get unique str key for MeasureInput
Parameters
----------
inp: autotvm.measure.MeasureInput
input for the measure
include_config: bool, optional
whether includes config in the str key
Returns
-------
key: str
The str representation of key
"""
config_str = str(inp.config) if include_config else ""
return "".join(
[str(inp.target), inp.task.name, str(inp.task.args), str(inp.task.kwargs), config_str]
)
def encode(inp, result, protocol="json"):
"""encode (MeasureInput, MeasureResult) pair to a string
Parameters
----------
inp: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
pair of input/result
protocol: str
log protocol, json or pickle
Returns
-------
row: str
a row in the logger file
"""
if protocol == "json":
json_dict = {
"input": (str(inp.target), inp.task.name, inp.task.args, inp.task.kwargs),
"config": inp.config.to_json_dict(),
"result": (
result.costs if result.error_no == 0 else (1e9,),
result.error_no,
result.all_cost,
result.timestamp,
),
"version": AUTOTVM_LOG_VERSION,
"tvm_version": __version__,
}
return json.dumps(json_dict)
if protocol == "pickle":
row = (
str(inp.target),
str(
base64.b64encode(
pickle.dumps([inp.task.name, inp.task.args, inp.task.kwargs])
).decode()
),
str(base64.b64encode(pickle.dumps(inp.config)).decode()),
str(base64.b64encode(pickle.dumps(tuple(result))).decode()),
str(AUTOTVM_LOG_VERSION),
str(__version__),
)
return "\t".join(row)
raise RuntimeError("Invalid log protocol: " + protocol)
def decode(row, protocol="json"):
"""Decode encoded record string to python object
Parameters
----------
row : str
a row in the logger file
protocol : str
log protocol, json or pickle
Returns
-------
ret : tuple(autotvm.measure.MeasureInput, autotvm.measure.MeasureResult), or None
The tuple of input and result, or None if input uses old version log format.
"""
# pylint: disable=unused-variable
global _old_version_warning
if protocol == "json":
row = json.loads(row)
if "v" in row and row["v"] == 0.1:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt, task_name, task_args, task_kwargs = row["input"]
tgt = str(tgt)
if "-target" in tgt:
logger.warning('"-target" is deprecated, use "-mtriple" instead.')
tgt = tgt.replace("-target", "-mtriple")
tgt = Target(str(tgt))
def clean_json_to_python(x):
"""1. Convert all list in x to tuple (hashable)
2. Convert unicode to str for python2
"""
if isinstance(x, list):
return tuple([clean_json_to_python(a) for a in x])
if isinstance(x, _unicode):
return str(x)
if isinstance(x, (_long, int)):
return int(x)
return x
tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args))
config = ConfigEntity.from_json_dict(row["config"])
inp = MeasureInput(tgt, tsk, config)
result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["result"]])
config.cost = np.mean(result.costs)
return inp, result
if protocol == "pickle":
items = row.split("\t")
if len(items) == 4:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt = Target(items[0])
task_tuple = pickle.loads(base64.b64decode(items[1].encode()))
config = pickle.loads(base64.b64decode(items[2].encode()))
result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode())))
config.cost = np.mean(result.costs)
tsk = task.Task(task_tuple[0], task_tuple[1])
return MeasureInput(tgt, tsk, config), result
raise RuntimeError("Invalid log protocol: " + protocol)
def load_from_buffer(file: TextIOBase):
"""Generator: load records from buffer.
This is a generator that yields the records.
Parameters
----------
file: io.TextIOBase
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
for row in file:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def load_from_file(filepath: Union[str, bytes, os.PathLike]):
"""Generator: load records from path.
This is a generator that yields the records.
Parameters
----------
filepath: str, bytes, or os.PathLike
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
with open(filepath) as f:
for row in f:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def split_workload(in_file, clean=True):
"""Split a log file into separate files, each of which contains only a single workload
This function can also delete duplicated records in log file
Parameters
----------
in_file: str
input filename
clean: bool
whether delete duplicated items
"""
tic = time.time()
lines = list(open(in_file).readlines())
logger.info("start converting...")
pool = popen_pool.PopenPoolExecutor()
lines = [rec for rec in pool.map(decode, lines) if rec is not None]
logger.info("map done %.2f", time.time() - tic)
wkl_dict = OrderedDict()
for inp, res in lines:
wkl = measure_str_key(inp, False)
if wkl not in wkl_dict:
wkl_dict[wkl] = []
wkl_dict[wkl].append([inp, res])
if clean:
for i, (k, v) in enumerate(wkl_dict.items()):
# clean duplicated items
added = set()
cleaned = []
for inp, res in v:
str_key = measure_str_key(inp)
if str_key in added:
continue
added.add(str_key)
cleaned.append([inp, res])
# write to file
logger.info("Key: %s\tValid: %d\tDup: %d\t", k, len(cleaned), len(v) - len(cleaned))
with open(args.i + f".{i:03d}.wkl", "w") as fout:
for inp, res in cleaned:
fout.write(encode(inp, res) + "\n")
else:
for i, (k, v) in enumerate(wkl_dict.items()):
logger.info("Key: %s\tNum: %d", k, len(v))
with open(args.i + f".{i:03d}.wkl", "w") as fout:
for inp, res in v:
fout.write(encode(inp, res) + "\n")
def pick_best(in_file, out_file):
"""
Pick the best entries from a file and store them to another file.
This function distills the useful log entries from a large log file.
If out_file already exists, the best entries from both
in_file and out_file will be saved.
Parameters
----------
in_file: str
The filename of input
out_file: str or file
The filename of output
"""
context = load_from_file(in_file)
if os.path.isfile(out_file):
out_context = load_from_file(out_file)
context = itertools.chain(context, out_context)
context, context_clone = itertools.tee(context)
best_context = ApplyHistoryBest(context)
best_set = set()
for v in best_context.best_by_model.values():
best_set.add(measure_str_key(v[0]))
for v in best_context.best_by_targetkey.values():
best_set.add(measure_str_key(v[0]))
logger.info("Extract %d best records from the %s", len(best_set), in_file)
fout = open(out_file, "w") if isinstance(out_file, str) else out_file
for inp, res in context_clone:
if measure_str_key(inp) in best_set:
fout.write(encode(inp, res) + "\n")
best_set.remove(measure_str_key(inp))
"""
Usage:
This record executable module has three modes.
* Print log file in readable format
e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code
* Extract history best from a large log file
e.g. python -m tvm.autotvm.record --mode pick --i collect.log
* Split a log file into separate files, each of which contains only a single wkl
e.g. python -m tvm.autotvm.record --mode split --i collect.log
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["read", "pick", "split"], default="read")
parser.add_argument("--i", type=str, help="input file")
parser.add_argument("--o", type=str, default=None, help="output file")
parser.add_argument("--begin", type=int, default=0)
parser.add_argument("--end", type=int, default=5)
parser.add_argument("--ir", action="store_true")
parser.add_argument("--code", action="store_true")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.mode == "pick":
args.o = args.o or args.i + ".best.log"
pick_best(args.i, args.o)
elif args.mode == "read":
for i, (inp, result) in enumerate(load_from_file(args.i)):
if args.begin <= i < args.end:
with inp.target:
s, arg_bufs = inp.task.instantiate(inp.config)
print("")
print(inp.target, inp.task, inp.config)
print(result)
if args.ir:
with inp.target:
print(lower(s, arg_bufs, simple_mode=True))
if args.code:
with inp.target:
func = build(s, arg_bufs)
print(func.imported_modules[0].get_source())
elif args.mode == "split":
split_workload(args.i)
|
4f82829aa25c21d0b34c727189810c710c285578
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/clist/migrations/0047_resource_icon.py
|
aa101b57e48949003a46bfc8337ff59023a5ff62
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
0047_resource_icon.py
|
# Generated by Django 2.2.13 on 2020-07-19 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clist', '0046_auto_20200712_2220'),
]
operations = [
migrations.AddField(
model_name='resource',
name='icon',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
d5c40491eb58f3409a35c195c888d900b84ba82f
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/tests/evaluation_tests/test_tasks.py
|
50f3f1c2a9e2b09bbf9765f7a6bde38c8b3053fc
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 14,620
|
py
|
test_tasks.py
|
from pathlib import Path
import pytest
import requests
from actstream.actions import unfollow
from django.conf import settings
from django.core.cache import cache
from django.test import TestCase
from django.utils.html import format_html
from redis.exceptions import LockError
from grandchallenge.algorithms.models import Job
from grandchallenge.components.models import ComponentInterface
from grandchallenge.components.tasks import (
push_container_image,
validate_docker_image,
)
from grandchallenge.evaluation.models import Evaluation, Method
from grandchallenge.evaluation.tasks import set_evaluation_inputs
from grandchallenge.notifications.models import Notification
from grandchallenge.profiles.templatetags.profiles import user_profile_link
from tests.algorithms_tests.factories import (
AlgorithmImageFactory,
AlgorithmJobFactory,
)
from tests.archives_tests.factories import ArchiveFactory, ArchiveItemFactory
from tests.components_tests.factories import ComponentInterfaceValueFactory
from tests.evaluation_tests.factories import (
EvaluationFactory,
MethodFactory,
SubmissionFactory,
)
from tests.utils import recurse_callbacks
@pytest.mark.django_db
def test_submission_evaluation(
client,
evaluation_image,
submission_file,
settings,
django_capture_on_commit_callbacks,
):
# Override the celery settings
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
# Upload a submission and create an evaluation
eval_container, sha256 = evaluation_image
with django_capture_on_commit_callbacks() as callbacks:
method = MethodFactory(image__from_path=eval_container)
recurse_callbacks(
callbacks=callbacks,
django_capture_on_commit_callbacks=django_capture_on_commit_callbacks,
)
# We should not be able to download methods
with pytest.raises(NotImplementedError):
_ = method.image.url
# This will create an evaluation, and we'll wait for it to be executed
with django_capture_on_commit_callbacks() as callbacks:
submission = SubmissionFactory(
predictions_file__from_path=submission_file, phase=method.phase
)
recurse_callbacks(
callbacks=callbacks,
django_capture_on_commit_callbacks=django_capture_on_commit_callbacks,
)
# The evaluation method should return the correct answer
assert len(submission.evaluation_set.all()) == 1
evaluation = submission.evaluation_set.first()
assert evaluation.stdout.endswith("Greetings from stdout")
assert evaluation.stderr.endswith('warn("Hello from stderr")')
assert "UserWarning: Could not google: [Errno " in evaluation.stderr
assert evaluation.error_message == ""
assert evaluation.status == evaluation.SUCCESS
assert (
evaluation.outputs.get(interface__slug="metrics-json-file").value[
"acc"
]
== 0.5
)
# Try with a csv file
with django_capture_on_commit_callbacks() as callbacks:
submission = SubmissionFactory(
predictions_file__from_path=Path(__file__).parent
/ "resources"
/ "submission.csv",
phase=method.phase,
)
recurse_callbacks(
callbacks=callbacks,
django_capture_on_commit_callbacks=django_capture_on_commit_callbacks,
)
evaluation = submission.evaluation_set.first()
assert len(submission.evaluation_set.all()) == 1
assert evaluation.status == evaluation.SUCCESS
assert (
evaluation.outputs.get(interface__slug="metrics-json-file").value[
"acc"
]
== 0.5
)
@pytest.mark.django_db
def test_method_validation(evaluation_image):
"""The validator should set the correct sha256 and set the ready bit."""
container, sha256 = evaluation_image
method = MethodFactory(image__from_path=container)
# The method factory fakes the sha256 on creation
assert method.image_sha256 != sha256
assert method.is_manifest_valid is None
assert method.is_in_registry is False
assert method.can_execute is False
validate_docker_image(
pk=method.pk,
app_label=method._meta.app_label,
model_name=method._meta.model_name,
mark_as_desired=False,
)
method = Method.objects.get(pk=method.pk)
assert method.image_sha256 == sha256
assert method.is_manifest_valid is True
assert method.is_in_registry is True
assert method.can_execute is True
@pytest.mark.django_db
def test_container_pushing(evaluation_image):
container, sha256 = evaluation_image
method = MethodFactory(image__from_path=container, is_manifest_valid=True)
push_container_image(instance=method)
response = requests.get(
f"http://{settings.COMPONENTS_REGISTRY_URL}/v2/_catalog"
)
assert response.status_code == 200
assert "localhost/evaluation/method" in response.json()["repositories"]
response = requests.get(
f"http://{settings.COMPONENTS_REGISTRY_URL}/v2/localhost/evaluation/method/tags/list"
)
assert response.status_code == 200
assert str(method.pk) in response.json()["tags"]
@pytest.mark.django_db
def test_method_validation_invalid_dockerfile(alpine_images):
"""Uploading two images in a tar archive should fail."""
method = MethodFactory(image__from_path=alpine_images)
assert method.is_manifest_valid is None
validate_docker_image(
pk=method.pk,
app_label=method._meta.app_label,
model_name=method._meta.model_name,
mark_as_desired=False,
)
method = Method.objects.get(pk=method.pk)
assert method.is_manifest_valid is False
assert "should only have 1 image" in method.status
@pytest.mark.django_db
def test_method_validation_root_dockerfile(root_image):
"""Uploading two images in a tar archive should fail."""
method = MethodFactory(image__from_path=root_image)
assert method.is_manifest_valid is None
validate_docker_image(
pk=method.pk,
app_label=method._meta.app_label,
model_name=method._meta.model_name,
mark_as_desired=False,
)
method = Method.objects.get(pk=method.pk)
assert method.is_manifest_valid is False
assert "runs as root" in method.status
@pytest.mark.django_db
def test_method_validation_not_a_docker_tar(submission_file):
"""Upload something that isn't a docker file should be invalid."""
method = MethodFactory(image__from_path=submission_file)
assert method.is_manifest_valid is None
validate_docker_image(
pk=method.pk,
app_label=method._meta.app_label,
model_name=method._meta.model_name,
mark_as_desired=False,
)
method = Method.objects.get(pk=method.pk)
assert method.is_manifest_valid is False
assert "manifest.json not found" in method.status
class TestSetEvaluationInputs(TestCase):
def setUp(self):
interface = ComponentInterface.objects.get(
slug="generic-medical-image"
)
archive = ArchiveFactory()
ais = ArchiveItemFactory.create_batch(2)
archive.items.set(ais)
input_civs = ComponentInterfaceValueFactory.create_batch(
2, interface=interface
)
output_civs = ComponentInterfaceValueFactory.create_batch(
2, interface=interface
)
for ai, civ in zip(ais, input_civs, strict=True):
ai.values.set([civ])
alg = AlgorithmImageFactory()
submission = SubmissionFactory(algorithm_image=alg)
submission.phase.archive = archive
submission.phase.save()
submission.phase.algorithm_inputs.set([interface])
jobs = []
for inpt, output in zip(input_civs, output_civs, strict=True):
j = AlgorithmJobFactory(status=Job.SUCCESS, algorithm_image=alg)
j.inputs.set([inpt])
j.outputs.set([output])
jobs.append(j)
self.evaluation = EvaluationFactory(
submission=submission, status=Evaluation.EXECUTING_PREREQUISITES
)
self.jobs = jobs
self.output_civs = output_civs
def test_set_evaluation_inputs(self):
set_evaluation_inputs(evaluation_pk=self.evaluation.pk)
self.evaluation.refresh_from_db()
assert self.evaluation.status == self.evaluation.PENDING
assert self.evaluation.error_message == ""
assert self.evaluation.inputs.count() == 3
assert self.evaluation.input_prefixes == {
str(civ.pk): f"{alg.pk}/output/"
for alg, civ in zip(self.jobs, self.output_civs, strict=True)
}
@pytest.mark.django_db
def test_non_zip_submission_failure(
client,
evaluation_image,
submission_file,
settings,
django_capture_on_commit_callbacks,
):
# Override the celery settings
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
# Upload a submission and create an evaluation
eval_container, sha256 = evaluation_image
method = MethodFactory(
image__from_path=eval_container,
image_sha256=sha256,
is_manifest_valid=True,
is_in_registry=True,
is_desired_version=True,
)
# Try with a 7z file
with django_capture_on_commit_callbacks(execute=True):
submission = SubmissionFactory(
predictions_file__from_path=Path(__file__).parent
/ "resources"
/ "submission.7z",
phase=method.phase,
)
# The evaluation method should return the correct answer
assert len(submission.evaluation_set.all()) == 1
evaluation = submission.evaluation_set.first()
assert evaluation.error_message.endswith(
"7z-compressed files are not supported."
)
assert evaluation.status == evaluation.FAILURE
@pytest.mark.django_db
def test_evaluation_notifications(
client,
evaluation_image,
submission_file,
settings,
django_capture_on_commit_callbacks,
):
# Override the celery settings
settings.task_eager_propagates = (True,)
settings.task_always_eager = (True,)
# Try to upload a submission without a method in place
with django_capture_on_commit_callbacks(execute=True):
submission = SubmissionFactory(
predictions_file__from_path=submission_file
)
# Missing should result in notification for admins of the challenge
# There are 2 notifications here. The second is about admin addition to the
# challenge, both notifications are for the admin.
for notification in Notification.objects.all():
assert notification.user == submission.phase.challenge.creator
assert (
"there is no valid evaluation method"
in Notification.objects.filter(message="missing method")
.get()
.print_notification(user=submission.phase.challenge.creator)
)
# Add method and upload a submission
eval_container, sha256 = evaluation_image
with django_capture_on_commit_callbacks() as callbacks:
method = MethodFactory(image__from_path=eval_container)
recurse_callbacks(
callbacks=callbacks,
django_capture_on_commit_callbacks=django_capture_on_commit_callbacks,
)
# clear notifications for easier testing later
Notification.objects.all().delete()
# create submission and wait for it to be evaluated
with django_capture_on_commit_callbacks() as callbacks:
submission = SubmissionFactory(
predictions_file__from_path=submission_file, phase=method.phase
)
recurse_callbacks(
callbacks=callbacks,
django_capture_on_commit_callbacks=django_capture_on_commit_callbacks,
)
# creator of submission and admins of challenge should get notification
# about successful submission
recipients = list(submission.phase.challenge.get_admins())
recipients.append(submission.creator)
assert Notification.objects.count() == len(recipients)
for recipient in recipients:
assert str(recipient) in str(Notification.objects.all())
result_string = format_html(
'<a href="{}">result</a>', submission.get_absolute_url()
)
submission_string = format_html(
'<a href="{}">submission</a>', submission.get_absolute_url()
)
challenge_string = format_html(
'<a href="{}">{}</a>',
submission.phase.challenge.get_absolute_url(),
submission.phase.challenge.short_name,
)
assert (
f"There is a new {result_string} for {challenge_string}"
in Notification.objects.filter(user=recipients[0])
.get()
.print_notification(user=recipients[0])
)
assert (
f"Your {submission_string} to {challenge_string} succeeded"
in Notification.objects.filter(user=recipients[1])
.get()
.print_notification(user=recipients[1])
)
Notification.objects.all().delete()
# update evaluation status to failed
evaluation = submission.evaluation_set.first()
evaluation.update_status(status=evaluation.FAILURE)
assert evaluation.status == evaluation.FAILURE
# notifications for admin and creator of submission
assert Notification.objects.count() == len(recipients)
for recipient in recipients:
assert str(recipient) in str(Notification.objects.all())
assert f"The {submission_string} from {user_profile_link(Notification.objects.filter(user=recipients[0]).get().actor)} to {challenge_string} failed" in Notification.objects.filter(
user=recipients[0]
).get().print_notification(
user=recipients[0]
)
assert (
f"Your {submission_string} to {challenge_string} failed"
in Notification.objects.filter(user=recipients[1])
.get()
.print_notification(user=recipients[1])
)
# check that when admin unsubscribed from phase, they no longer
# receive notifications about activity related to that phase
Notification.objects.all().delete()
unfollow(user=submission.phase.challenge.creator, obj=submission.phase)
evaluation.update_status(status=evaluation.SUCCESS)
assert str(submission.phase.challenge.creator) not in str(
Notification.objects.all()
)
def test_cache_lock():
# Used in create_algorithm_jobs_for_evaluation
with cache.lock("foo", timeout=5, blocking_timeout=1):
try:
with cache.lock("foo", timeout=5, blocking_timeout=1):
raise RuntimeError("Test failed, shouldn't hit this line")
except LockError:
assert True
|
66338ec0910b8b4513c22ee5fa767d1122767f39
|
ff4ce3522d502248f56b32438b303c3301185709
|
/cwltool/singularity_utils.py
|
e4cc889182accfb85cf6a3aca43fc78eae0cd425
|
[
"Apache-2.0"
] |
permissive
|
common-workflow-language/cwltool
|
d8304f3dcd6e31bda6d0ea11452b692987e39b28
|
bd89c5694685bff46bf56fb32316c8f6fe0d799d
|
refs/heads/main
| 2023-08-24T09:43:39.331516
| 2023-08-23T15:05:17
| 2023-08-23T16:45:11
| 43,816,051
| 336
| 258
|
Apache-2.0
| 2023-09-13T10:55:19
| 2015-10-07T13:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
singularity_utils.py
|
"""Support for executing Docker format containers using Singularity {2,3}.x or Apptainer 1.x."""
import os
import os.path
from subprocess import DEVNULL, PIPE, Popen, TimeoutExpired # nosec
from typing import Optional
_USERNS: Optional[bool] = None
def singularity_supports_userns() -> bool:
"""Confirm if the version of Singularity install supports the --userns flag."""
global _USERNS # pylint: disable=global-statement
if _USERNS is None:
try:
hello_image = os.path.join(os.path.dirname(__file__), "hello.simg")
result = Popen( # nosec
["singularity", "exec", "--userns", hello_image, "true"],
stderr=PIPE,
stdout=DEVNULL,
universal_newlines=True,
).communicate(timeout=60)[1]
_USERNS = (
"No valid /bin/sh" in result
or "/bin/sh doesn't exist in container" in result
or "executable file not found in" in result
)
except TimeoutExpired:
_USERNS = False
return _USERNS
|
ee39cbbdc6c4f5b16768b4cb648d98d42a85d1b1
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/tests/parsers/test_networkmanager_config.py
|
042599855f55ef6e094a62a861bfffdb02b6de7a
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,114
|
py
|
test_networkmanager_config.py
|
from insights.parsers.networkmanager_config import NetworkManagerConfig
from insights.parsers import networkmanager_config
from insights.tests import context_wrap
import doctest
NETWORKMANAGER_CONF = """
# Configuration file for NetworkManager.
#
# See "man 5 NetworkManager.conf" for details.
#
# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/
# can contain additional configuration snippets installed by packages. These files are
# read before NetworkManager.conf and have thus lowest priority.
# The directory /etc/NetworkManager/conf.d/ can contain additional configuration
# snippets. Those snippets are merged last and overwrite the settings from this main
# file.
#
# The files within one conf.d/ directory are read in asciibetical order.
#
# If /etc/NetworkManager/conf.d/ contains a file with the same name as
# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored.
# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can
# put an empty file to /etc with the same name. The same applies with respect
# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow
# /usr/lib and are themselves shadowed by files under /etc.
#
# If two files define the same key, the one that is read afterwards will overwrite
# the previous one.
[main]
#plugins=ifcfg-rh,ibft
dhcp=dhclient
[logging]
# When debugging NetworkManager, enabling debug logging is of great help.
#
# Logfiles contain no passwords and little sensitive information. But please
# check before posting the file online. You can also personally hand over the
# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode.
# Please post full logfiles except minimal modifications of private data.
#
# You can also change the log-level at runtime via
# $ nmcli general logging level TRACE domains ALL
# However, usually it's cleaner to enable debug logging
# in the configuration and restart NetworkManager so that
# debug logging is enabled from the start.
#
# You will find the logfiles in syslog, for example via
# $ journalctl -u NetworkManager
#
# Note that debug logging of NetworkManager can be quite verbose. Some messages
# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst
# in man journald.conf).
#
#level=TRACE
#domains=ALL
"""
NETWORKMANAGER_CONF_NOTMATCH = """
# Configuration file for NetworkManager.
#
# See "man 5 NetworkManager.conf" for details.
#
# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/
# can contain additional configuration snippets installed by packages. These files are
# read before NetworkManager.conf and have thus lowest priority.
# The directory /etc/NetworkManager/conf.d/ can contain additional configuration
# snippets. Those snippets are merged last and overwrite the settings from this main
# file.
#
# The files within one conf.d/ directory are read in asciibetical order.
#
# If /etc/NetworkManager/conf.d/ contains a file with the same name as
# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored.
# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can
# put an empty file to /etc with the same name. The same applies with respect
# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow
# /usr/lib and are themselves shadowed by files under /etc.
#
# If two files define the same key, the one that is read afterwards will overwrite
# the previous one.
[logging]
# When debugging NetworkManager, enabling debug logging is of great help.
#
# Logfiles contain no passwords and little sensitive information. But please
# check before posting the file online. You can also personally hand over the
# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode.
# Please post full logfiles except minimal modifications of private data.
#
# You can also change the log-level at runtime via
# $ nmcli general logging level TRACE domains ALL
# However, usually it's cleaner to enable debug logging
# in the configuration and restart NetworkManager so that
# debug logging is enabled from the start.
#
# You will find the logfiles in syslog, for example via
# $ journalctl -u NetworkManager
#
# Note that debug logging of NetworkManager can be quite verbose. Some messages
# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst
# in man journald.conf).
#
#level=TRACE
domains=ALL
"""
def test_networkmanager_config_match():
result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF))
assert result.get('main', 'dhcp') == 'dhclient'
def test_networkmanager_config_notmatch():
result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF_NOTMATCH))
assert result.has_option('main', 'dhcp') is False
def test_networkmanager_config_doc_examples():
env = {
'networkmanager_config_obj': NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF)),
}
failed, total = doctest.testmod(networkmanager_config, globs=env)
assert failed == 0
|
48249e1f40d9b05badcdf1fb5358968ec2206616
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/diffeq/inverse_function.py
|
bf283384079fbeb7fba41146398585c794d779fb
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
inverse_function.py
|
"""
Given a set of coordinates in the array xcoor, and a Python function
f(x), compute the inverse of f(x), call it g(x), and return the array
g(xcoor). The computation is done point by point. We have
f(g(x)) = x
which holds at every point xi:
f(g(xi)) = xi
We do not know the value g(xi) so let us call it gamma. Then
we have the (generally) nonlinear equation
f(gamma) = xi
for gamma, which can be solved by, e.g., Newton's method.
"""
from Newton import Newton
from scitools.std import *
def f(x):
return x**2 - 1
#return x**2 - 1
def F(gamma):
return f(gamma) - xi
def dFdx(gamma):
return (F(gamma+h) - F(gamma-h))/(2*h)
h = 1E-6
x = linspace(0.01, 3, 21)
g = zeros(len(x))
for i in range(len(x)):
xi = x[i]
# Compute start value (use last g[i-1] if possible)
if i == 0:
gamma0 = x[0]
else:
gamma0 = g[i-1]
gamma, n, F_value = Newton(F, gamma0, dFdx)
g[i] = gamma
plot(x, f(x), 'r-', x, g, 'b-',
title='f1', legend=('original', 'inverse'),
hardcopy='tmp.eps')
|
73fe3967672eb07a5267bb50ba9e590587433aba
|
3ec38f732b21b0a00e822dac730bdc1748902144
|
/scar/providers/aws/clients/iam.py
|
e85783990bb41d28fff00a456c63b47df6d9fe64
|
[
"Apache-2.0"
] |
permissive
|
grycap/scar
|
e5594c1eb79a0730409c97d48bc511757a05dcbd
|
e6c8b06a43b310d2c1e58d7826239e259dd826d7
|
refs/heads/master
| 2023-08-22T00:39:28.004454
| 2023-05-22T11:01:10
| 2023-05-22T11:01:10
| 91,441,209
| 613
| 59
|
Apache-2.0
| 2022-11-29T06:36:46
| 2017-05-16T09:35:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
iam.py
|
# Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with the class necessary to manage the
IAM creation, deletion and configuration."""
from typing import Dict
from botocore.exceptions import ClientError
from scar.providers.aws.clients import BotoClient
from scar.exceptions import exception, GetUserInfoError
import scar.logger as logger
from scar.utils import StrUtils
class IAMClient(BotoClient):
"""A low-level client representing aws Identity and Access Management (IAMClient).
DOC_URL: https://boto3.readthedocs.io/en/latest/reference/services/iam.html"""
# Parameter used by the parent to create the appropriate boto3 client
_BOTO_CLIENT_NAME = 'iam'
_USER_NAME_REGEX = r'(?<=user\/)(\S+)'
@exception(logger)
def get_user_info(self) -> Dict:
"""Retrieves information about the specified IAM user,
including the user's creation date, path, unique ID, and ARN."""
try:
return self.client.get_user()
except ClientError as cerr:
if cerr.response['Error']['Code'] == 'AccessDenied':
# If the user doesn't have access rights to IAMClient
# we can find the user name in the error response
user_name = StrUtils.find_expression(str(cerr), self._USER_NAME_REGEX)
return {'UserName' : user_name,
'User' : {'UserName' : user_name,
'UserId' : ''}}
raise cerr
except Exception as ex:
raise GetUserInfoError(error_msg=ex)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.