hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790594ceb0379d0282ab529d7d592d700eff1a8c
| 6,702
|
py
|
Python
|
syslog.py
|
Bun/ha-syslog-devtracker
|
c789679dee8d9e07616a9661ac05d3663eb0b4b9
|
[
"MIT"
] | null | null | null |
syslog.py
|
Bun/ha-syslog-devtracker
|
c789679dee8d9e07616a9661ac05d3663eb0b4b9
|
[
"MIT"
] | null | null | null |
syslog.py
|
Bun/ha-syslog-devtracker
|
c789679dee8d9e07616a9661ac05d3663eb0b4b9
|
[
"MIT"
] | 1
|
2019-11-21T09:40:36.000Z
|
2019-11-21T09:40:36.000Z
|
"""
Support for Syslog-based networking devices.
For now, support is limited to hostapd and dnsmasq.
Example syslog lines:
<30>Dec 31 13:03:21 router hostapd: wlan1: STA a4:77:33:e3:17:7c WPA: group key handshake completed (RSN)
<29>Dec 31 13:05:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:15:22 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: disassociated
<30>Dec 31 13:15:23 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: deauthenticated due to inactivity (timer DEAUTH/REMOVE)
<29>Dec 31 13:20:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:02:33 router dnsmasq-dhcp[1601]: DHCPACK(br-lan) 192.168.0.101 f4:6d:04:ae:ac:d7 leon-pc
"""
from asyncio import coroutine
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_DEVICES
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
#vol.Optional(CONF_WHITELIST): cv.string, # ACL
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=514): cv.port,
# mac => name
vol.Required(CONF_DEVICES): {cv.string: cv.string},
# TODO: TCP vs UDP
# TODO: periodically ARP ping wired devices
})
Event = namedtuple('Event', 'mac kind is_sta reason')
STA_EVENTS = {
'WPA: group key handshake completed': 'home',
'WPA: pairwise key handshake completed': 'home',
'deauthenticated due to local deauth request': 'not_home',
'IEEE 802.11: disconnected due to excessive missing ACKs': 'timeout',
'IEEE 802.11: disassociated due to inactivity': 'timeout',
'IEEE 802.11: deauthenticated due to inactivity': 'timeout',
# Ignored, should be covered by AP-STA-*
'IEEE 802.11: associated': '',
'IEEE 802.11: authenticated': '',
'IEEE 802.11: disassociated': '',
}
def _skip_date_tokens(tokens):
"""
Based on RFC 3164 + RFC 5424 and real-world logs
"""
if tokens and tokens[0].startswith('<'):
tokens.pop(0)
while tokens and (not tokens[0] or tokens[0][:1].isdigit()):
tokens.pop(0)
def _find_process(tokens):
while tokens:
token = tokens.pop(0)
if token.endswith(':'):
c = token.find('[')
if c > -1:
return token[:c]
return token[:-1]
def _remove_param(tokens):
i = len(tokens) - 1
while i > 0:
if tokens[i].startswith('('):
return tokens[:i]
i -= 1
return tokens
def parse_syslog_line(line):
"""Parses lines created by hostapd and dnsmasq DHCP"""
tokens = line.split(' ')
_skip_date_tokens(tokens)
process = _find_process(tokens)
if not process or not tokens:
_LOGGER.debug('Unable to process line: %r', line)
return
if process == 'hostapd':
# <iface>: AP-STA-<event>: <mac>
if len(tokens) == 3:
if tokens[1] == 'AP-STA-CONNECTED':
return Event(tokens[2], 'home', True, tokens[1])
elif tokens[1] == 'AP-STA-DISCONNECTED':
# Disconnected, but we might get the real reason later
return Event(tokens[2], 'timeout', True, tokens[1])
elif len(tokens) > 4 and tokens[1] == 'STA':
# <iface>: STA <mac> WPA: <...>
# <iface>: STA <mac> IEEE 802.11: <...>
suffix = ' '.join(_remove_param(tokens[3:]))
for consider, status in STA_EVENTS.items():
if suffix.endswith(consider):
if status == '':
return
return Event(tokens[2], status, True, suffix)
_LOGGER.warning('Unhandled line: %r', line)
elif process == 'dnsmasq-dhcp':
if len(tokens) >= 3:
# <event>(<iface> <ip> <mac> <name>
if tokens[0].startswith('DHCPACK('):
return Event(tokens[2], 'home', False, tokens[0])
class SyslogScanner:
def __init__(self, hass, async_see, devices):
self.hass = hass
self.devices = devices
self.wireless_devices = set()
self.async_see = async_see
# TODO: consider marking all devices as offline after start
self.debug_marked = {}
#async_track_time_interval(hass, self.scan_online_devices,
# timedelta(minutes=1))
@coroutine
def scan_online_devices(self, now=None):
_LOGGER.info('Check online devices')
for mac, name in self.devices.items():
if mac in self.wireless_devices:
continue
_LOGGER.info('Check %r', mac)
def process_line(self, line):
event = parse_syslog_line(line.rstrip('\n'))
if not event:
return
_LOGGER.info('%r', event)
mac = event.mac.replace(':', '')
if event.is_sta:
self.wireless_devices.add(mac)
device = self.devices.get(mac)
if not device:
# Automatic tracking
device = self.devices[mac] = mac
consider_home = None
state = event.kind
if event.kind == 'timeout':
state = 'not_home'
# TODO: this feature has not been added yet
consider_home = timedelta(minutes=5)
if self.debug_marked.get(device) != state:
_LOGGER.info('Mark %r as %r [%s]', device, state, consider_home)
self.debug_marked[device] = state
self.hass.async_add_job(self.async_see(dev_id=device,
source_type=SOURCE_TYPE_ROUTER,
mac=event.mac,
#consider_home=consider_home,
location_name=state))
class SyslogScannerUDP(SyslogScanner):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = data.decode('utf8', 'replace')
self.process_line(message)
@coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
bind = (config[CONF_HOST], config[CONF_PORT])
_LOGGER.info('Listening on %s:%s', bind[0], bind[1])
proto = lambda: SyslogScannerUDP(hass, async_see, config[CONF_DEVICES])
listen = hass.loop.create_datagram_endpoint(proto, local_addr=bind)
hass.async_add_job(listen)
return True
| 35.089005
| 137
| 0.609669
|
from asyncio import coroutine
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_DEVICES
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=514): cv.port,
vol.Required(CONF_DEVICES): {cv.string: cv.string},
})
Event = namedtuple('Event', 'mac kind is_sta reason')
STA_EVENTS = {
'WPA: group key handshake completed': 'home',
'WPA: pairwise key handshake completed': 'home',
'deauthenticated due to local deauth request': 'not_home',
'IEEE 802.11: disconnected due to excessive missing ACKs': 'timeout',
'IEEE 802.11: disassociated due to inactivity': 'timeout',
'IEEE 802.11: deauthenticated due to inactivity': 'timeout',
'IEEE 802.11: associated': '',
'IEEE 802.11: authenticated': '',
'IEEE 802.11: disassociated': '',
}
def _skip_date_tokens(tokens):
if tokens and tokens[0].startswith('<'):
tokens.pop(0)
while tokens and (not tokens[0] or tokens[0][:1].isdigit()):
tokens.pop(0)
def _find_process(tokens):
while tokens:
token = tokens.pop(0)
if token.endswith(':'):
c = token.find('[')
if c > -1:
return token[:c]
return token[:-1]
def _remove_param(tokens):
i = len(tokens) - 1
while i > 0:
if tokens[i].startswith('('):
return tokens[:i]
i -= 1
return tokens
def parse_syslog_line(line):
tokens = line.split(' ')
_skip_date_tokens(tokens)
process = _find_process(tokens)
if not process or not tokens:
_LOGGER.debug('Unable to process line: %r', line)
return
if process == 'hostapd':
if len(tokens) == 3:
if tokens[1] == 'AP-STA-CONNECTED':
return Event(tokens[2], 'home', True, tokens[1])
elif tokens[1] == 'AP-STA-DISCONNECTED':
return Event(tokens[2], 'timeout', True, tokens[1])
elif len(tokens) > 4 and tokens[1] == 'STA':
suffix = ' '.join(_remove_param(tokens[3:]))
for consider, status in STA_EVENTS.items():
if suffix.endswith(consider):
if status == '':
return
return Event(tokens[2], status, True, suffix)
_LOGGER.warning('Unhandled line: %r', line)
elif process == 'dnsmasq-dhcp':
if len(tokens) >= 3:
if tokens[0].startswith('DHCPACK('):
return Event(tokens[2], 'home', False, tokens[0])
class SyslogScanner:
def __init__(self, hass, async_see, devices):
self.hass = hass
self.devices = devices
self.wireless_devices = set()
self.async_see = async_see
self.debug_marked = {}
@coroutine
def scan_online_devices(self, now=None):
_LOGGER.info('Check online devices')
for mac, name in self.devices.items():
if mac in self.wireless_devices:
continue
_LOGGER.info('Check %r', mac)
def process_line(self, line):
event = parse_syslog_line(line.rstrip('\n'))
if not event:
return
_LOGGER.info('%r', event)
mac = event.mac.replace(':', '')
if event.is_sta:
self.wireless_devices.add(mac)
device = self.devices.get(mac)
if not device:
device = self.devices[mac] = mac
consider_home = None
state = event.kind
if event.kind == 'timeout':
state = 'not_home'
consider_home = timedelta(minutes=5)
if self.debug_marked.get(device) != state:
_LOGGER.info('Mark %r as %r [%s]', device, state, consider_home)
self.debug_marked[device] = state
self.hass.async_add_job(self.async_see(dev_id=device,
source_type=SOURCE_TYPE_ROUTER,
mac=event.mac,
location_name=state))
class SyslogScannerUDP(SyslogScanner):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = data.decode('utf8', 'replace')
self.process_line(message)
@coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
bind = (config[CONF_HOST], config[CONF_PORT])
_LOGGER.info('Listening on %s:%s', bind[0], bind[1])
proto = lambda: SyslogScannerUDP(hass, async_see, config[CONF_DEVICES])
listen = hass.loop.create_datagram_endpoint(proto, local_addr=bind)
hass.async_add_job(listen)
return True
| true
| true
|
790595cff4f39bec191db0f87f33bacf84dd9dd8
| 1,008
|
py
|
Python
|
preprocess/watershed.py
|
essential2189/Cell-Based-Model
|
f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e
|
[
"MIT"
] | null | null | null |
preprocess/watershed.py
|
essential2189/Cell-Based-Model
|
f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e
|
[
"MIT"
] | null | null | null |
preprocess/watershed.py
|
essential2189/Cell-Based-Model
|
f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e
|
[
"MIT"
] | null | null | null |
#-*-coding:utf-8-*-
import numpy as np
import cv2
import gc
from tqdm import tqdm
def watershed(opencv_image):
top_n_label = 2
gray = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2GRAY)
print('convert gray end')
gray[gray == 0] = 255
_, cvt_img = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
del(gray)
print('threshold end')
ret, markers = cv2.connectedComponents(cvt_img)
print('connectedComponents end')
label_dict = dict()
for i in tqdm(range(ret)):
if i == 0:
continue
label_dict[i] = len(markers[markers == i])
sort_label_list = sorted(label_dict.items(), key=lambda item: item[1], reverse=True)
print('label end')
result = np.zeros(markers.shape)
for ins in tqdm(sort_label_list[:top_n_label]):
result[markers == ins[0]] = 255
print(result.shape)
print('top n label end')
del(ret)
del(markers)
del(sort_label_list)
del(label_dict)
del(cvt_img)
return result
| 21.446809
| 88
| 0.641865
|
import numpy as np
import cv2
import gc
from tqdm import tqdm
def watershed(opencv_image):
top_n_label = 2
gray = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2GRAY)
print('convert gray end')
gray[gray == 0] = 255
_, cvt_img = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
del(gray)
print('threshold end')
ret, markers = cv2.connectedComponents(cvt_img)
print('connectedComponents end')
label_dict = dict()
for i in tqdm(range(ret)):
if i == 0:
continue
label_dict[i] = len(markers[markers == i])
sort_label_list = sorted(label_dict.items(), key=lambda item: item[1], reverse=True)
print('label end')
result = np.zeros(markers.shape)
for ins in tqdm(sort_label_list[:top_n_label]):
result[markers == ins[0]] = 255
print(result.shape)
print('top n label end')
del(ret)
del(markers)
del(sort_label_list)
del(label_dict)
del(cvt_img)
return result
| true
| true
|
790595dd224074cc2fcfbece35f8a00f05b5ad51
| 2,192
|
py
|
Python
|
setup.py
|
GenusGeoff/tiingo-python
|
969cbca4124b0ad366d476ea55a31e55677aea7c
|
[
"MIT"
] | null | null | null |
setup.py
|
GenusGeoff/tiingo-python
|
969cbca4124b0ad366d476ea55a31e55677aea7c
|
[
"MIT"
] | null | null | null |
setup.py
|
GenusGeoff/tiingo-python
|
969cbca4124b0ad366d476ea55a31e55677aea7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script. Based on Jeff Knupp's Demo + Cookiecutter"""
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
NAME = 'tiingo'
AUTHOR = "Cameron Yick"
EMAIL = 'cameron.yick@gmail.com'
URL = 'https://github.com/hydrosquall/tiingo-python'
DESCRIPTION = "REST Client for Tiingo Data Platform API"
LONG_DESCRIPTION = read('README.rst', 'HISTORY.rst')
requirements = [
'requests',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'vcrpy',
]
# Metadata about the module
# Load the package's __version__.py module as a dictionary.
# Via https://github.com/kennethreitz/setup.py/blob/master/setup.py
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(include=[NAME]),
include_package_data=True,
install_requires=requirements,
extras_require={'pandas': ['pandas>=0.18']},
license="MIT license",
zip_safe=False,
keywords=['tiingo', 'finance', 'stocks', 'rest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Office/Business :: Financial :: Investment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 27.4
| 67
| 0.654197
|
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
NAME = 'tiingo'
AUTHOR = "Cameron Yick"
EMAIL = 'cameron.yick@gmail.com'
URL = 'https://github.com/hydrosquall/tiingo-python'
DESCRIPTION = "REST Client for Tiingo Data Platform API"
LONG_DESCRIPTION = read('README.rst', 'HISTORY.rst')
requirements = [
'requests',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'vcrpy',
]
# Via https://github.com/kennethreitz/setup.py/blob/master/setup.py
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(include=[NAME]),
include_package_data=True,
install_requires=requirements,
extras_require={'pandas': ['pandas>=0.18']},
license="MIT license",
zip_safe=False,
keywords=['tiingo', 'finance', 'stocks', 'rest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Office/Business :: Financial :: Investment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| true
| true
|
7905967b16f5e68d83f5d748d613ba0a86cdfb67
| 15,637
|
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20210615/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 53.00678
| 1,676
| 0.703716
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true
| true
|
790596a93c5dcc1ad2ee157fcfea3482b9f652a8
| 7,032
|
py
|
Python
|
configs/litehrnet/ocr_litehrxv3_split_aux-border_256x256_40k_hrf_amsoftmax.py
|
openvinotoolkit/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 3
|
2021-12-21T07:25:13.000Z
|
2022-02-07T01:59:19.000Z
|
configs/litehrnet/ocr_litehrxv3_split_aux-border_256x256_40k_hrf_amsoftmax.py
|
openvinotoolkit/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 13
|
2021-12-10T15:08:56.000Z
|
2022-03-23T08:58:03.000Z
|
configs/litehrnet/ocr_litehrxv3_split_aux-border_256x256_40k_hrf_amsoftmax.py
|
evgeny-izutov/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 3
|
2021-11-11T23:16:51.000Z
|
2021-12-08T23:49:29.000Z
|
_base_ = [
'../_base_/models/fcn_litehrxv3_no-aggregator.py', '../_base_/datasets/hrf_extra.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_step_40k_ml.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
decode_head=[
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=4.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=4.0),
]),
dict(type='OCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
],
auxiliary_head=[
dict(type='AuxOCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_target='gt_class_borders',
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.1, p=1.7),
loss_weight=5.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=5.0),
]),
],
train_cfg=dict(
mix_loss=dict(
enable=False,
weight=0.1
),
mutual_loss=[
dict(type='MutualLoss',
head_a_name='decode_1',
head_b_name='aux_0',
sampler=dict(type='OHEMPixelSampler', kept_ratio=0.1),
loss_weight=2.0),
],
loss_reweighting=dict(
weights={'decode_0.loss_seg': 0.5,
'decode_1.loss_seg': 1.0,
'aux_0.loss_seg': 0.9,
'aux_1.loss_seg': 0.5,
'loss_mutual': 0.5},
momentum=0.1
),
),
test_cfg=dict(
mode='slide',
crop_size=(1024, 1024),
stride=(680, 680)
),
)
evaluation = dict(
metric='mDice',
)
| 35.695431
| 89
| 0.436576
|
_base_ = [
'../_base_/models/fcn_litehrxv3_no-aggregator.py', '../_base_/datasets/hrf_extra.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_step_40k_ml.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
decode_head=[
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=4.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=4.0),
]),
dict(type='OCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
],
auxiliary_head=[
dict(type='AuxOCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_target='gt_class_borders',
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.1, p=1.7),
loss_weight=5.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=5.0),
]),
],
train_cfg=dict(
mix_loss=dict(
enable=False,
weight=0.1
),
mutual_loss=[
dict(type='MutualLoss',
head_a_name='decode_1',
head_b_name='aux_0',
sampler=dict(type='OHEMPixelSampler', kept_ratio=0.1),
loss_weight=2.0),
],
loss_reweighting=dict(
weights={'decode_0.loss_seg': 0.5,
'decode_1.loss_seg': 1.0,
'aux_0.loss_seg': 0.9,
'aux_1.loss_seg': 0.5,
'loss_mutual': 0.5},
momentum=0.1
),
),
test_cfg=dict(
mode='slide',
crop_size=(1024, 1024),
stride=(680, 680)
),
)
evaluation = dict(
metric='mDice',
)
| true
| true
|
790597103a8919f936e95edf5d4fa370cb587876
| 3,459
|
py
|
Python
|
main.py
|
nicksacco17/Quantum_Information
|
c1ee8369f34a9b78a7360cd68b6f2da082266d4a
|
[
"MIT"
] | null | null | null |
main.py
|
nicksacco17/Quantum_Information
|
c1ee8369f34a9b78a7360cd68b6f2da082266d4a
|
[
"MIT"
] | null | null | null |
main.py
|
nicksacco17/Quantum_Information
|
c1ee8369f34a9b78a7360cd68b6f2da082266d4a
|
[
"MIT"
] | 1
|
2022-03-17T00:45:04.000Z
|
2022-03-17T00:45:04.000Z
|
from PauliInteraction import PauliInteraction
from Ising import Ising
from CoefficientGenerator import CoefficientGenerator
from Evaluator import Evaluator
#from DataVisualizer import DataVisualizer
#from DataLogger import DataLogger
import Driver as Driver_H
from MasterEquation import MasterEquation
from QuantumAnnealer import QuantumAnnealer
from Test import test
#from mpi4py import MPI
import qutip as qp
import numpy as np
import time as time
import sys
X_HAT = "X"
Y_HAT = "Y"
Z_HAT = "Z"
#NUM_TESTS = 1
#N = 7
T = 100
#NUM_INTERACTIONS = int((N * (N - 1)) / 2)
RAND_COEF = False
MASTER_RANK = 0
# MAIN TEST CASES FROM SAMPLE CODE
# THIS IS NOT USED!
def main():
print(argv)
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
N = 100
m_N = (int) (N / NUM_PROC)
if M_RANK == MASTER_RANK:
A = np.arange(N, dtype = np.float64)
start_time = MPI.Wtime()
else:
A = np.empty(N, dtype = np.float64)
m_A = np.empty(m_N, dtype = np.float64)
# Scatter
COMM.Scatter([A, MPI.DOUBLE], [m_A, MPI.DOUBLE])
for i in range(m_N):
m_A[i] = M_RANK
COMM.Barrier()
COMM.Allgather([m_A, MPI.DOUBLE], [A, MPI.DOUBLE])
COMM.Barrier()
if M_RANK == MASTER_RANK:
print(A)
#for i in range(10):
#test(RAND_COEF, i)
# THIS IS USED!
def parallel_main(NUM_TESTS, NUM_QUBITS):
# MPI Initialization
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
# If process is master rank, allocate array of overlap probabilities
if M_RANK == MASTER_RANK:
overlap_probabilities = np.zeros(NUM_TESTS, dtype = np.float64)
start_time = MPI.Wtime()
else:
overlap_probabilities = np.empty(NUM_TESTS, dtype = np.float64);
# Calculate the local number of tests to perform
M_TESTS = (int) (NUM_TESTS / NUM_PROC)
# Allocate local overlap probablity arrays
m_overlap_probabilities = np.empty(M_TESTS, dtype = np.float64)
# Scatter the global overlap probabilities
COMM.Scatter([overlap_probabilities, MPI.DOUBLE], [m_overlap_probabilities, MPI.DOUBLE])
# And for each process, perform its local tests and save overlap probability
for i in range(M_TESTS):
m_overlap_probabilities[i] = test(RAND_COEF, 0, NUM_QUBITS)
# Enforce synchronization
COMM.Barrier()
# Gather the local overlap probabilities in master rank
COMM.Allgather([m_overlap_probabilities, MPI.DOUBLE], [overlap_probabilities, MPI.DOUBLE])
# Enforce synchronization
COMM.Barrier()
# When tests are done, master rank will process data and print
if M_RANK == MASTER_RANK:
stop_time = MPI.Wtime()
total_time = stop_time - start_time
# Print probabilities - TODO(Log this to a file, not just print to screen)
#for i in range(len(overlap_probabilities)):
#print("ITERATION %d, OVERLAP PROBABILITY = %f" % (i, overlap_probabilities[i]))
# Print run statistics
print("---------- NUMBER OF QUBITS = %d ----------" % NUM_QUBITS)
print("\tNUMBER OF PROCESSES = %d" % NUM_PROC)
print("\tNUMBER OF TESTS = %d" % NUM_TESTS)
print("\tTOTAL TIME = %f sec" % total_time)
print("------------------------------------------")
# Initial script
if __name__ == "__main__":
NUM_TESTS = int(sys.argv[1])
NUM_QUBITS = int(sys.argv[2])
parallel_main(NUM_TESTS, NUM_QUBITS)
| 26.007519
| 94
| 0.665799
|
from PauliInteraction import PauliInteraction
from Ising import Ising
from CoefficientGenerator import CoefficientGenerator
from Evaluator import Evaluator
import Driver as Driver_H
from MasterEquation import MasterEquation
from QuantumAnnealer import QuantumAnnealer
from Test import test
import qutip as qp
import numpy as np
import time as time
import sys
X_HAT = "X"
Y_HAT = "Y"
Z_HAT = "Z"
T = 100
RAND_COEF = False
MASTER_RANK = 0
def main():
print(argv)
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
N = 100
m_N = (int) (N / NUM_PROC)
if M_RANK == MASTER_RANK:
A = np.arange(N, dtype = np.float64)
start_time = MPI.Wtime()
else:
A = np.empty(N, dtype = np.float64)
m_A = np.empty(m_N, dtype = np.float64)
COMM.Scatter([A, MPI.DOUBLE], [m_A, MPI.DOUBLE])
for i in range(m_N):
m_A[i] = M_RANK
COMM.Barrier()
COMM.Allgather([m_A, MPI.DOUBLE], [A, MPI.DOUBLE])
COMM.Barrier()
if M_RANK == MASTER_RANK:
print(A)
def parallel_main(NUM_TESTS, NUM_QUBITS):
COMM = MPI.COMM_WORLD
NUM_PROC = COMM.Get_size()
M_RANK = COMM.Get_rank()
if M_RANK == MASTER_RANK:
overlap_probabilities = np.zeros(NUM_TESTS, dtype = np.float64)
start_time = MPI.Wtime()
else:
overlap_probabilities = np.empty(NUM_TESTS, dtype = np.float64);
M_TESTS = (int) (NUM_TESTS / NUM_PROC)
m_overlap_probabilities = np.empty(M_TESTS, dtype = np.float64)
COMM.Scatter([overlap_probabilities, MPI.DOUBLE], [m_overlap_probabilities, MPI.DOUBLE])
for i in range(M_TESTS):
m_overlap_probabilities[i] = test(RAND_COEF, 0, NUM_QUBITS)
COMM.Barrier()
COMM.Allgather([m_overlap_probabilities, MPI.DOUBLE], [overlap_probabilities, MPI.DOUBLE])
COMM.Barrier()
if M_RANK == MASTER_RANK:
stop_time = MPI.Wtime()
total_time = stop_time - start_time
print("---------- NUMBER OF QUBITS = %d ----------" % NUM_QUBITS)
print("\tNUMBER OF PROCESSES = %d" % NUM_PROC)
print("\tNUMBER OF TESTS = %d" % NUM_TESTS)
print("\tTOTAL TIME = %f sec" % total_time)
print("------------------------------------------")
if __name__ == "__main__":
NUM_TESTS = int(sys.argv[1])
NUM_QUBITS = int(sys.argv[2])
parallel_main(NUM_TESTS, NUM_QUBITS)
| true
| true
|
790597b07fa152bf367fe83a10426b36ad3170a5
| 5,922
|
py
|
Python
|
ServidorPython/python32_web/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 25
|
2019-03-08T01:03:03.000Z
|
2022-02-14T17:38:32.000Z
|
ServidorPython/python32_web/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 9
|
2020-09-25T22:32:02.000Z
|
2022-02-09T23:45:10.000Z
|
ServidorPython/python32_web/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
# To use this experimental feature, we need to explicitly ask for it:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize(
'params, err_msg',
[({'loss': 'blah'}, 'Loss blah is not supported for'),
({'learning_rate': 0}, 'learning_rate=0 must be strictly positive'),
({'learning_rate': -1}, 'learning_rate=-1 must be strictly positive'),
({'max_iter': 0}, 'max_iter=0 must not be smaller than 1'),
({'max_leaf_nodes': 0}, 'max_leaf_nodes=0 should not be smaller than 2'),
({'max_leaf_nodes': 1}, 'max_leaf_nodes=1 should not be smaller than 2'),
({'max_depth': 0}, 'max_depth=0 should not be smaller than 2'),
({'max_depth': 1}, 'max_depth=1 should not be smaller than 2'),
({'min_samples_leaf': 0}, 'min_samples_leaf=0 should not be smaller'),
({'l2_regularization': -1}, 'l2_regularization=-1 must be positive'),
({'max_bins': 1}, 'max_bins=1 should be no smaller than 2 and no larger'),
({'max_bins': 257}, 'max_bins=257 should be no smaller than 2 and no'),
({'n_iter_no_change': -1}, 'n_iter_no_change=-1 must be positive'),
({'validation_fraction': -1}, 'validation_fraction=-1 must be strictly'),
({'validation_fraction': 0}, 'validation_fraction=0 must be strictly'),
({'tol': -1}, 'tol=-1 must not be smaller than 0')]
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = ("loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead")
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('neg_mean_squared_error', .1, 5, 1e-7), # use scorer
('neg_mean_squared_error', None, 5, 1e-1), # use scorer on train data
(None, .1, 5, 1e-7), # same with default scorer
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_regression(scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 200
X, y = make_regression(random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('data', (
make_classification(random_state=0),
make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('accuracy', .1, 5, 1e-7), # use scorer
('accuracy', None, 5, 1e-1), # use scorer on training data
(None, .1, 5, 1e-7), # same with default scorerscor
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_classification(data, scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
'scores, n_iter_no_change, tol, stopping',
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0., False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0., True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
]
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(
n_iter_no_change=n_iter_no_change, tol=tol
)
assert gbdt._should_stop(scores) == stopping
| 40.013514
| 79
| 0.647248
|
import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize(
'params, err_msg',
[({'loss': 'blah'}, 'Loss blah is not supported for'),
({'learning_rate': 0}, 'learning_rate=0 must be strictly positive'),
({'learning_rate': -1}, 'learning_rate=-1 must be strictly positive'),
({'max_iter': 0}, 'max_iter=0 must not be smaller than 1'),
({'max_leaf_nodes': 0}, 'max_leaf_nodes=0 should not be smaller than 2'),
({'max_leaf_nodes': 1}, 'max_leaf_nodes=1 should not be smaller than 2'),
({'max_depth': 0}, 'max_depth=0 should not be smaller than 2'),
({'max_depth': 1}, 'max_depth=1 should not be smaller than 2'),
({'min_samples_leaf': 0}, 'min_samples_leaf=0 should not be smaller'),
({'l2_regularization': -1}, 'l2_regularization=-1 must be positive'),
({'max_bins': 1}, 'max_bins=1 should be no smaller than 2 and no larger'),
({'max_bins': 257}, 'max_bins=257 should be no smaller than 2 and no'),
({'n_iter_no_change': -1}, 'n_iter_no_change=-1 must be positive'),
({'validation_fraction': -1}, 'validation_fraction=-1 must be strictly'),
({'validation_fraction': 0}, 'validation_fraction=0 must be strictly'),
({'tol': -1}, 'tol=-1 must not be smaller than 0')]
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = ("loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead")
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('neg_mean_squared_error', .1, 5, 1e-7),
('neg_mean_squared_error', None, 5, 1e-1),
(None, .1, 5, 1e-7),
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7),
('loss', None, 5, 1e-1),
(None, None, None, None),
])
def test_early_stopping_regression(scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 200
X, y = make_regression(random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1,
min_samples_leaf=5,
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('data', (
make_classification(random_state=0),
make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('accuracy', .1, 5, 1e-7),
('accuracy', None, 5, 1e-1),
(None, .1, 5, 1e-7),
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7),
('loss', None, 5, 1e-1),
(None, None, None, None),
])
def test_early_stopping_classification(data, scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1,
min_samples_leaf=5,
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
'scores, n_iter_no_change, tol, stopping',
[
([], 1, 0.001, False),
([1, 1, 1], 5, 0.001, False),
([1, 1, 1, 1, 1], 5, 0.001, False),
([1, 2, 3, 4, 5, 6], 5, 0.001, False),
([1, 2, 3, 4, 5, 6], 5, 0., False),
([1, 2, 3, 4, 5, 6], 5, 0.999, False),
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False),
([1] * 6, 5, 0., True),
([1] * 6, 5, 0.001, True),
([1] * 6, 5, 5, True),
]
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(
n_iter_no_change=n_iter_no_change, tol=tol
)
assert gbdt._should_stop(scores) == stopping
| true
| true
|
790598542ab2923ee70838942a0a01cb700db6ad
| 1,688
|
py
|
Python
|
env/lib/python3.8/site-packages/flask_cli/ext.py
|
thenack/sneaky
|
40bd9ae5004f49d7e3614ad9ec025a8f6bf2bced
|
[
"MIT"
] | null | null | null |
env/lib/python3.8/site-packages/flask_cli/ext.py
|
thenack/sneaky
|
40bd9ae5004f49d7e3614ad9ec025a8f6bf2bced
|
[
"MIT"
] | null | null | null |
env/lib/python3.8/site-packages/flask_cli/ext.py
|
thenack/sneaky
|
40bd9ae5004f49d7e3614ad9ec025a8f6bf2bced
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Flask-CLI
# Copyright (C) 2015 CERN.
#
# Flask-AppFactory is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask extension to enable CLI."""
import types
from . import AppGroup
class FlaskCLI(object):
"""Flask-CLI extension.
Initialization of the extension:
>>> from flask import Flask
>>> from flask_cli import FlaskCLI
>>> app = Flask('myapp')
>>> FlaskCLI(app)
or alternatively using the factory pattern:
>>> app = Flask('myapp')
>>> ext = FlaskCLI()
>>> ext.init_app(app)
"""
def __init__(self, app=None):
"""Initialize the Flask-CLI."""
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize a Flask application."""
# Follow the Flask guidelines on usage of app.extensions
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'flask-cli' in app.extensions:
raise RuntimeError("Flask-CLI application already initialized")
app.extensions['flask-cli'] = self
self.setup_pre10(app)
def setup_pre10(self, app):
"""Setup Flask pre-1.0 application object."""
if hasattr(app, 'cli'):
return
from flask_cli.app import make_shell_context, shell_context_processor
app.cli = AppGroup(app.name)
app.shell_context_processors = []
app.make_shell_context = types.MethodType(make_shell_context, app)
app.shell_context_processor = types.MethodType(
shell_context_processor, app)
| 28.133333
| 77
| 0.635664
|
import types
from . import AppGroup
class FlaskCLI(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'flask-cli' in app.extensions:
raise RuntimeError("Flask-CLI application already initialized")
app.extensions['flask-cli'] = self
self.setup_pre10(app)
def setup_pre10(self, app):
if hasattr(app, 'cli'):
return
from flask_cli.app import make_shell_context, shell_context_processor
app.cli = AppGroup(app.name)
app.shell_context_processors = []
app.make_shell_context = types.MethodType(make_shell_context, app)
app.shell_context_processor = types.MethodType(
shell_context_processor, app)
| true
| true
|
7905992b8e0da560c43dd071c06f5397e8beae9c
| 157
|
py
|
Python
|
Curso de Cisco/Actividades/py/packages/extra/good/best/tau.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/py/packages/extra/good/best/tau.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/py/packages/extra/good/best/tau.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
""" example module: extra.good.best.tau """
def FunT():
return "Tau"
if __name__ == "__main__":
print("I prefer to be a module")
| 17.444444
| 43
| 0.649682
|
def FunT():
return "Tau"
if __name__ == "__main__":
print("I prefer to be a module")
| true
| true
|
790599540b04918bead3ef7fe0b70e4fe69c4eea
| 833
|
py
|
Python
|
increment_build.py
|
EpicSalvation/FlutterStep
|
02fa2a2b90a9cbd7ced909f35d102162b61878dd
|
[
"MIT"
] | null | null | null |
increment_build.py
|
EpicSalvation/FlutterStep
|
02fa2a2b90a9cbd7ced909f35d102162b61878dd
|
[
"MIT"
] | null | null | null |
increment_build.py
|
EpicSalvation/FlutterStep
|
02fa2a2b90a9cbd7ced909f35d102162b61878dd
|
[
"MIT"
] | null | null | null |
# Simple script for updating the build number in pubspec.yaml
import re
# Regex patter to be used to ID the correct line in pubspec.yaml
version_line_pattern = "version:\s+\d+\.\d+\.\d+\+\d+"
# Open pubspec.yaml and read lines into memory
with open("pubspec.yaml", "r") as current_pubspec:
contents = current_pubspec.readlines()
# Reopen pubspec.yaml for writing and update
with open("pubspec.yaml", "w") as updated_pubspec:
# Find and bump build number
counter = 0
for line in contents:
if re.match(pattern=version_line_pattern, string=line):
line_array = line.split("+")
contents[counter] = line_array[0] + "+" + str(int(line_array[1]) + 1) + "\n"
break
counter += 1
# Write updated contents back to disk
updated_pubspec.writelines(contents)
| 33.32
| 88
| 0.662665
|
import re
version_line_pattern = "version:\s+\d+\.\d+\.\d+\+\d+"
with open("pubspec.yaml", "r") as current_pubspec:
contents = current_pubspec.readlines()
with open("pubspec.yaml", "w") as updated_pubspec:
counter = 0
for line in contents:
if re.match(pattern=version_line_pattern, string=line):
line_array = line.split("+")
contents[counter] = line_array[0] + "+" + str(int(line_array[1]) + 1) + "\n"
break
counter += 1
updated_pubspec.writelines(contents)
| true
| true
|
79059b816d2f7a07a401a5eb9830e2585100cdd9
| 843
|
py
|
Python
|
projeto/urls.py
|
godah/s2b-python
|
e07aed1bec015ee13e33367bde827e6d7b66dd7d
|
[
"MIT"
] | null | null | null |
projeto/urls.py
|
godah/s2b-python
|
e07aed1bec015ee13e33367bde827e6d7b66dd7d
|
[
"MIT"
] | null | null | null |
projeto/urls.py
|
godah/s2b-python
|
e07aed1bec015ee13e33367bde827e6d7b66dd7d
|
[
"MIT"
] | null | null | null |
"""projeto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from blog import views
"Nome do App blog + views"
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index),
]
| 33.72
| 79
| 0.698695
|
from django.conf.urls import url
from django.contrib import admin
from blog import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index),
]
| true
| true
|
79059bc7c9641a5ddc0475c06968ca053e13e8a2
| 30,329
|
py
|
Python
|
Lib/test/test_capi.py
|
kmoza/cpython
|
fd93ae0c03a087e73dfcfb9995678223f6f16c1c
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2020-10-09T12:23:54.000Z
|
2020-10-09T12:23:54.000Z
|
Lib/test/test_capi.py
|
kmoza/cpython
|
fd93ae0c03a087e73dfcfb9995678223f6f16c1c
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/test/test_capi.py
|
kmoza/cpython
|
fd93ae0c03a087e73dfcfb9995678223f6f16c1c
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2020-10-09T12:23:55.000Z
|
2020-10-09T12:23:55.000Z
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
| 40.492657
| 106
| 0.601141
|
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_mutate_exception(self):
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
| true
| true
|
79059cac0c226a2ed8528429f8d6cceb48ac6300
| 128
|
py
|
Python
|
run.py
|
siketh/TRBlog
|
39855f23cee5c5639360374fa2c6ec5325321450
|
[
"MIT"
] | null | null | null |
run.py
|
siketh/TRBlog
|
39855f23cee5c5639360374fa2c6ec5325321450
|
[
"MIT"
] | 18
|
2016-06-21T04:30:14.000Z
|
2016-09-27T23:07:41.000Z
|
run.py
|
siketh/trevorroman-dot-com
|
39855f23cee5c5639360374fa2c6ec5325321450
|
[
"MIT"
] | null | null | null |
#!flask/bin/python
from app import app
from config import DEBUG_MODE
if __name__ == '__main__':
app.run(debug=DEBUG_MODE)
| 16
| 29
| 0.742188
|
from app import app
from config import DEBUG_MODE
if __name__ == '__main__':
app.run(debug=DEBUG_MODE)
| true
| true
|
79059fdd31f7d9e39fa492d14cec49f11c28129d
| 4,490
|
py
|
Python
|
howtolens/simulators/chapter_2/mass_sis__source_exp_x2.py
|
rakaar/PyAutoLens
|
bc140c5d196c426092c1178b8abfa492c6fab859
|
[
"MIT"
] | null | null | null |
howtolens/simulators/chapter_2/mass_sis__source_exp_x2.py
|
rakaar/PyAutoLens
|
bc140c5d196c426092c1178b8abfa492c6fab859
|
[
"MIT"
] | null | null | null |
howtolens/simulators/chapter_2/mass_sis__source_exp_x2.py
|
rakaar/PyAutoLens
|
bc140c5d196c426092c1178b8abfa492c6fab859
|
[
"MIT"
] | null | null | null |
from os import path
import autolens as al
"""
This script simulates `Imaging` of a strong lens where:
- The lens `Galaxy`'s total mass distribution is a *SphericalIsothermal*.
- The source `Galaxy`'s `LightProfile` is a *SphericalExponential*.
This dataset is used in chapter 2, tutorials 1-3.
"""
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/lens_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/psf.fits`.
"""
dataset_type = "chapter_2"
dataset_name = "mass_sis__source_exp_x2"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/howtolens/dataset/chapter_2/mass_sis__source_exp/`
"""
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
"""
For simulating an image of a strong lens, we recommend using a GridIterate object. This represents a grid of $(y,x)$
coordinates like an ordinary Grid, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.GridIterate.uniform(
shape_2d=(100, 100),
pixel_scales=0.1,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
"""Simulate a simple Gaussian PSF for the image."""
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
"""
Setup the lens `Galaxy`'s mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
For lens modeling, defining ellipticity in terms of the `elliptical_comps` improves the model-fitting procedure.
However, for simulating a strong lens you may find it more intuitive to define the elliptical geometry using the
axis-ratio of the profile (axis_ratio = semi-major axis / semi-minor axis = b/a) and position angle phi, where phi is
in degrees and defined counter clockwise from the positive x-axis.
We can use the **PyAutoLens** `convert` module to determine the elliptical components from the axis-ratio and phi.
"""
lens_galaxy = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
)
source_galaxy_0 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
source_galaxy_1 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(-0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
"""Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset."""
tracer = al.Tracer.from_galaxies(
galaxies=[lens_galaxy, source_galaxy_0, source_galaxy_1]
)
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""Output our simulated dataset to the dataset path as .fits files"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
| 37.107438
| 120
| 0.73029
|
from os import path
import autolens as al
dataset_type = "chapter_2"
dataset_name = "mass_sis__source_exp_x2"
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
grid = al.GridIterate.uniform(
shape_2d=(100, 100),
pixel_scales=0.1,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
lens_galaxy = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
)
source_galaxy_0 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
source_galaxy_1 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(-0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
tracer = al.Tracer.from_galaxies(
galaxies=[lens_galaxy, source_galaxy_0, source_galaxy_1]
)
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
tracer.save(file_path=dataset_path, filename="true_tracer")
| true
| true
|
7905a02a3cf0e9f09ead09edf2fa29945dc8cdb3
| 1,260
|
py
|
Python
|
apps/api/serializers.py
|
azengard/reseller-api
|
6f08be4a63bb4e267d49f7368d2d8404c5b84230
|
[
"MIT"
] | null | null | null |
apps/api/serializers.py
|
azengard/reseller-api
|
6f08be4a63bb4e267d49f7368d2d8404c5b84230
|
[
"MIT"
] | null | null | null |
apps/api/serializers.py
|
azengard/reseller-api
|
6f08be4a63bb4e267d49f7368d2d8404c5b84230
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.serializers import PrimaryKeyRelatedField, RelatedField
class UniqueRelatedField(RelatedField):
"""
Like rest_framework's PrimaryKeyRelatedField, but selecting by any unique
field instead of the primary key.
"""
default_error_messages = PrimaryKeyRelatedField.default_error_messages.copy()
def __init__(self, field_name, serializer_field=None, **kwargs):
super().__init__(**kwargs)
self.related_field_name = field_name
self.serializer_field = serializer_field
def to_internal_value(self, data):
if self.serializer_field is not None:
data = self.serializer_field.to_internal_value(data)
try:
return self.get_queryset().get(**{self.related_field_name: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
value = getattr(value, self.related_field_name)
if self.serializer_field is not None:
value = self.serializer_field.to_representation(value)
return value
| 38.181818
| 81
| 0.711111
|
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.serializers import PrimaryKeyRelatedField, RelatedField
class UniqueRelatedField(RelatedField):
default_error_messages = PrimaryKeyRelatedField.default_error_messages.copy()
def __init__(self, field_name, serializer_field=None, **kwargs):
super().__init__(**kwargs)
self.related_field_name = field_name
self.serializer_field = serializer_field
def to_internal_value(self, data):
if self.serializer_field is not None:
data = self.serializer_field.to_internal_value(data)
try:
return self.get_queryset().get(**{self.related_field_name: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
value = getattr(value, self.related_field_name)
if self.serializer_field is not None:
value = self.serializer_field.to_representation(value)
return value
| true
| true
|
7905a0da55cd59454211170ef0459873b054418e
| 107
|
py
|
Python
|
jgsnippets/strings/__init__.py
|
jgontrum/snippets
|
a23bd196cc24b8163d83d9daca3fb60bc67eabcf
|
[
"MIT"
] | 1
|
2017-06-05T08:41:24.000Z
|
2017-06-05T08:41:24.000Z
|
jgsnippets/strings/__init__.py
|
jgontrum/snippets
|
a23bd196cc24b8163d83d9daca3fb60bc67eabcf
|
[
"MIT"
] | 1
|
2021-06-01T21:53:53.000Z
|
2021-06-01T21:53:53.000Z
|
jgsnippets/strings/__init__.py
|
jgontrum/snippets
|
a23bd196cc24b8163d83d9daca3fb60bc67eabcf
|
[
"MIT"
] | null | null | null |
from jgsnippets.strings.encoding import clean_encoding
from jgsnippets.strings.format import jprint, pprint
| 53.5
| 54
| 0.878505
|
from jgsnippets.strings.encoding import clean_encoding
from jgsnippets.strings.format import jprint, pprint
| true
| true
|
7905a0e03ce59fc1400be1265522fcc3a438b37d
| 19,122
|
py
|
Python
|
Unsupervised-Learning/rbm.py
|
gajeraj/MLSA-workshops-2020-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | 5
|
2020-02-27T07:04:44.000Z
|
2021-06-03T17:20:55.000Z
|
Unsupervised-Learning/rbm.py
|
Phoebe0222/MLSA-workshops-2019-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | null | null | null |
Unsupervised-Learning/rbm.py
|
Phoebe0222/MLSA-workshops-2019-student
|
cafbf5ac8750dd2b962174ad71dabf35ac90e2f4
|
[
"MIT"
] | 9
|
2019-08-09T12:08:28.000Z
|
2019-10-16T06:35:22.000Z
|
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
class RBM(object):
def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models',
model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10,
num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True):
""""
INPUT PARAMETER 1) num_visible: number of visible units in the RBM
INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM
INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories
INPUT PARAMETER 4) model_name: name of the model you wanna save the data
INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)
INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional)
INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent
INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)
INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)
INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)
INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)
INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True
INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)
"""
#Defining main paramters
self.num_visible = num_visible #1
self.num_hidden = num_hidden #2
self.main_dir = main_dir #3
self.model_name = model_name #4
self.gibbs_sampling_steps = gibbs_sampling_steps #5
self.learning_rate = learning_rate #6
self.momentum = momentum #7
self.l2 = l2 #8
self.batch_size = batch_size #9
self.num_epochs = num_epochs #10
self.stddev = stddev #11
self.verbose = verbose #12
self.plot_training_loss = plot_training_loss #13
self.visible_unit_type = visible_unit_type #14
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None#_build_model
self.hrand = None # _build_model
self.validation_size = None #fit
self.tf_session = None #fit
self.tf_saver = None #_initialize_tf_utilities_and_ops
def sample_prob(self,probs,rand):
""" takes a tensor of probabilitiesas from a sigmoidal activation and sample from all
the distributions.
probs INPUT parameter: tensor of probabilities
rand INPUT parameter :tensor (of same shape as probabilities) of random values
:RETURN binary sample of probabilities
"""
return tf.nn.relu(tf.sign(probs-rand))
def gen_batches(self,data,batch_size):
""" Divide input data into batches
data INPUT parameter: input data( like a data frame)
batch_size INPUT parameter: desired size of each batch
:RETURN data divided in batches
"""
data = np.array(data)
for i in range(0,data.shape[0],batch_size):
yield data[i:i+batch_size]
def fit(self,train_set,validation_set = None,restore_previous_model=False):
""""
fit the model to the training data
INPUT PARAMETER train_set: training set
INPUT PARAMETER validation set.default None (Hence Optional)
INPUT PARAMETER restore_previous_model:
if true, a previous trained model
with the same name of this model is restored from disk to continue training.
OUTPUT: self
"""
if validation_set is not None:
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()# you will come across it later on
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
#plot editing should be done here as you wish
plt.plot(self.training_losses)
plt.title("Training batch losses v.s. iteractions")
plt.xlabel("Num of training iteractions")
plt.ylabel("Reconstruction error")
plt.show()
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
""""
Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true.
"""
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path)
def _train_model(self, train_set, validation_set):
"""" Train the Model
INPUT PARAMETER train set: Training set
INPUT PARAMETER validation_set: Validation set
OUTPUT self
"""
for i in range(self.num_epochs):
self._run_train_step(train_set)
if validation_set is not None:
self._run_validation_error(i, validation_set)
def _run_train_step(self,train_set):
""""
Run a training step. A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch. If self.plot_training_loss
is true, will record training loss after each batch.
INPUT PARAMETER train_set: training set
OUTPUT self
"""
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
_,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch))
def _run_validation_error(self, epoch, validation_set):
"""
Run the error computation on the validation set and print it out for each epoch.
INPUT PARAMETER: current epoch
INPUT PARAMETER validation_set: validation data
OUTPUT: self
"""
loss = self.tf_session.run(self.loss_function,
feed_dict=self._create_feed_dict(validation_set))
if self.verbose == 1:
tqdm.write("Validation cost at step %s: %s" % (epoch, loss))
def _create_feed_dict(self, data):
""" Create the dictionary of data to feed to TensorFlow's session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
}
def _build_model(self):
"""
BUilding the Restriced Boltzman Machine in Tensorflow
"""
self.input_data, self.hrand = self._create_placeholders() #check the function below
self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()#check the function below
hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range(self.gibbs_sampling_steps - 1):
hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
#exact formula in my paper
dw = positive - negative
self.dw = self.momentum*self.dw + (1-self.momentum)*dw
self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W)
dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0)
self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean(self.input_data - vprobs, 0)
self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs)))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1))
self._create_free_energy_for_batch()
def _create_free_energy_for_batch(self):
""" Create free energy ops to batch input data
:return: self
"""
if self.visible_unit_type == 'bin':
self._create_free_energy_for_bin()
elif self.visible_unit_type == 'gauss':
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None
def _create_free_energy_for_bin(self):
""" Create free energy for mdoel with Bin visible layer
:return: self
"""
#Refer to the Binary Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_free_energy_for_gauss(self):
""" Create free energy for model with Gauss visible layer
:return: self
"""
#Refer to the Gaussian Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_placeholders(self):
""" Create the TensorFlow placeholders for the model.
:return: tuple(input(shape(None, num_visible)),
hrand(shape(None, num_hidden)))
"""
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return x, hrand
def _create_variables(self):
""" Create the TensorFlow variables for the model.
:return: tuple(weights(shape(num_visible, num_hidden),
hidden bias(shape(num_hidden)),
visible bias(shape(num_visible)))
"""
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return W, bh_, bv_, dw, dbh_, dbv_
def gibbs_sampling_step(self, visible):
""" Performs one step of gibbs sampling.
:param visible: activations of the visible units
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1
def sample_hidden_from_visible(self, visible):
""" Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states)
"""
hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_)
hstates = self.sample_prob(hprobs, self.hrand)
return hprobs, hstates
def sample_visible_from_hidden(self, hidden):
""" Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:return: visible probabilities
"""
visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs
def compute_positive_association(self, visible, hidden_probs, hidden_states):
""" Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden)
"""
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive
def _create_model_directory(self):
""" Create the directory for storing the model
:return: self
"""
if not os.path.isdir(self.main_dir):
print("Created dir: ", self.main_dir)
os.mkdir(self.main_dir)
def getRecontructError(self, data):
""" return Reconstruction Error (loss) from data in batch.
:param data: input data of shape num_samples x visible_size
:return: Reconstruction cost for each sample in the batch
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost,
feed_dict=self._create_feed_dict(data))
return batch_loss
def getFreeEnergy(self, data):
""" return Free Energy from data.
:param data: input data of shape num_samples x visible_size
:return: Free Energy for each sample: p(x)
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy,
feed_dict=self._create_feed_dict(data))
return batch_FE
def getRecontruction(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_reconstruct = self.tf_session.run(self.recontruct,
feed_dict=self._create_feed_dict(data))
return batch_reconstruct
def load_model(self, shape, gibbs_sampling_steps, model_path):
""" Load a trained model from disk. The shape of the model
(num_visible, num_hidden) and the number of gibbs sampling steps
must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self
"""
self.num_visible, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path)
def get_model_parameters(self):
""" Return the model parameters in the form of numpy arrays.
:return: model parameters
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
}
#The MIT License (MIT)
#Copyright (c) 2016 Gabriele Angeletti
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#© 2019 GitHub, Inc.
| 38.474849
| 354
| 0.633459
|
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
class RBM(object):
def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models',
model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10,
num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True):
self.num_visible = num_visible
self.num_hidden = num_hidden
self.main_dir = main_dir
self.model_name = model_name
self.gibbs_sampling_steps = gibbs_sampling_steps
self.learning_rate = learning_rate
self.momentum = momentum
self.l2 = l2
self.batch_size = batch_size
self.num_epochs = num_epochs
self.stddev = stddev
self.verbose = verbose
self.plot_training_loss = plot_training_loss
self.visible_unit_type = visible_unit_type
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None
self.hrand = None
self.validation_size = None
self.tf_session = None
self.tf_saver = None
def sample_prob(self,probs,rand):
return tf.nn.relu(tf.sign(probs-rand))
def gen_batches(self,data,batch_size):
data = np.array(data)
for i in range(0,data.shape[0],batch_size):
yield data[i:i+batch_size]
def fit(self,train_set,validation_set = None,restore_previous_model=False):
if validation_set is not None:
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
plt.plot(self.training_losses)
plt.title("Training batch losses v.s. iteractions")
plt.xlabel("Num of training iteractions")
plt.ylabel("Reconstruction error")
plt.show()
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path)
def _train_model(self, train_set, validation_set):
for i in range(self.num_epochs):
self._run_train_step(train_set)
if validation_set is not None:
self._run_validation_error(i, validation_set)
def _run_train_step(self,train_set):
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
_,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch))
def _run_validation_error(self, epoch, validation_set):
loss = self.tf_session.run(self.loss_function,
feed_dict=self._create_feed_dict(validation_set))
if self.verbose == 1:
tqdm.write("Validation cost at step %s: %s" % (epoch, loss))
def _create_feed_dict(self, data):
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
}
def _build_model(self):
self.input_data, self.hrand = self._create_placeholders()
self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()
hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range(self.gibbs_sampling_steps - 1):
hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
dw = positive - negative
self.dw = self.momentum*self.dw + (1-self.momentum)*dw
self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W)
dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0)
self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean(self.input_data - vprobs, 0)
self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs)))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1))
self._create_free_energy_for_batch()
def _create_free_energy_for_batch(self):
if self.visible_unit_type == 'bin':
self._create_free_energy_for_bin()
elif self.visible_unit_type == 'gauss':
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None
def _create_free_energy_for_bin(self):
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_free_energy_for_gauss(self):
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_placeholders(self):
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return x, hrand
def _create_variables(self):
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return W, bh_, bv_, dw, dbh_, dbv_
def gibbs_sampling_step(self, visible):
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1
def sample_hidden_from_visible(self, visible):
hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_)
hstates = self.sample_prob(hprobs, self.hrand)
return hprobs, hstates
def sample_visible_from_hidden(self, hidden):
visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs
def compute_positive_association(self, visible, hidden_probs, hidden_states):
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive
def _create_model_directory(self):
if not os.path.isdir(self.main_dir):
print("Created dir: ", self.main_dir)
os.mkdir(self.main_dir)
def getRecontructError(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost,
feed_dict=self._create_feed_dict(data))
return batch_loss
def getFreeEnergy(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy,
feed_dict=self._create_feed_dict(data))
return batch_FE
def getRecontruction(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_reconstruct = self.tf_session.run(self.recontruct,
feed_dict=self._create_feed_dict(data))
return batch_reconstruct
def load_model(self, shape, gibbs_sampling_steps, model_path):
self.num_visible, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path)
def get_model_parameters(self):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
}
| true
| true
|
7905a14b3082687027c806486229180e9f17e8c2
| 6,386
|
py
|
Python
|
tests/report/test_report_test.py
|
data-mermaid/golem
|
06bbb33275a9f79cd7cc30a0e264b9bafdad3073
|
[
"MIT"
] | null | null | null |
tests/report/test_report_test.py
|
data-mermaid/golem
|
06bbb33275a9f79cd7cc30a0e264b9bafdad3073
|
[
"MIT"
] | null | null | null |
tests/report/test_report_test.py
|
data-mermaid/golem
|
06bbb33275a9f79cd7cc30a0e264b9bafdad3073
|
[
"MIT"
] | null | null | null |
import json
import os
from golem.core import utils
from golem.test_runner import test_runner
from golem.report.execution_report import create_execution_directory
from golem.report.execution_report import create_execution_dir_single_test
from golem.report import test_report
from golem.report.test_report import get_test_case_data
from golem.report.test_report import get_test_debug_log
from golem.report.test_report import create_report_directory
from golem.report.test_report import generate_report
class TestGetTestCaseData:
def test_get_test_case_data(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
test_data = get_test_case_data(project, test_name, exc['suite_name'],
exc['timestamp'], test_set)
assert test_data['name'] == exc['tests'][0]
assert isinstance(test_data['debug_log'], list) and len(test_data['debug_log'])
assert isinstance(test_data['info_log'], list) and len(test_data['info_log'])
assert test_data['has_finished'] is True
class TestTestReportDirectory:
def test_test_report_directory(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory(project, suite, timestamp, test, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', suite, timestamp,
test, test_set)
assert path == expected
class TestTestReportDirectorySingleTest:
def test_test_report_directory_single_test(self, project_session):
testdir, project = project_session.activate()
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory_single_test(project, test, timestamp, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', 'single_tests',
test, timestamp, test_set)
assert path == expected
class TestGetTestLog:
def test_get_test_x_log(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
log = get_test_debug_log(project, exc['timestamp'], test_name, test_set,
suite=exc['suite_name'])
assert 'root DEBUG test does not have setup function' in log
# inexistent test set
log = get_test_debug_log(project, exc['timestamp'], test_name,
'inexistent_test_set', suite=exc['suite_name'])
assert log is None
# inexistent test
log = get_test_debug_log(project, exc['timestamp'], 'inexistent_test_name',
test_set, suite=exc['suite_name'])
assert log is None
class TestCreateReportDirectory:
def test_create_report_directory_test(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_001'
exec_dir = create_execution_dir_single_test(project, test_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=False)
assert os.path.isdir(directory)
def test_create_report_directory_suite(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
suite_name = 'suite_foo_002'
test_name = 'testing_report_002'
exec_dir = create_execution_directory(project, suite_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=True)
assert os.path.isdir(directory)
class TestGenerateReport:
def test_generate_report_with_env(self, project_session):
_, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_003'
suite_name = 'suite_foo_003'
exec_dir = create_execution_directory(project, suite_name, timestamp)
report_dir = create_report_directory(exec_dir, test_name, is_suite=True)
test_data = {
'env': {
'name': 'env01',
'url': '1.1.1.1'
},
'var2': 'value2'
}
test_data = test_runner.Data(test_data)
result = {
'result': 'success',
'errors': [],
'description': 'description of the test',
'steps': [
{'message': 'step1', 'screenshot': None, 'error': None},
{'message': 'step2', 'screenshot': None, 'error': None}
],
'test_elapsed_time': 22.22,
'test_timestamp': '2018.02.04.02.16.42.729',
'browser': 'chrome',
'browser_full_name': '',
'set_name': 'set_001',
}
generate_report(report_dir, test_name, test_data, result)
path = os.path.join(report_dir, 'report.json')
with open(path) as report_file:
actual = json.load(report_file)
assert len(actual.items()) == 11
assert actual['test_case'] == test_name
assert actual['result'] == 'success'
assert actual['steps'][0]['message'] == 'step1'
assert actual['steps'][1]['message'] == 'step2'
assert actual['description'] == 'description of the test'
assert actual['errors'] == []
assert actual['test_elapsed_time'] == 22.22
assert actual['test_timestamp'] == '2018.02.04.02.16.42.729'
assert actual['browser'] == 'chrome'
assert actual['environment'] == 'env01'
assert actual['set_name'] == 'set_001'
test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
assert actual['test_data']['env'] in [test_data_a, test_data_b]
assert actual['test_data']['var2'] == "'value2'"
| 41.2
| 96
| 0.628406
|
import json
import os
from golem.core import utils
from golem.test_runner import test_runner
from golem.report.execution_report import create_execution_directory
from golem.report.execution_report import create_execution_dir_single_test
from golem.report import test_report
from golem.report.test_report import get_test_case_data
from golem.report.test_report import get_test_debug_log
from golem.report.test_report import create_report_directory
from golem.report.test_report import generate_report
class TestGetTestCaseData:
def test_get_test_case_data(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
test_data = get_test_case_data(project, test_name, exc['suite_name'],
exc['timestamp'], test_set)
assert test_data['name'] == exc['tests'][0]
assert isinstance(test_data['debug_log'], list) and len(test_data['debug_log'])
assert isinstance(test_data['info_log'], list) and len(test_data['info_log'])
assert test_data['has_finished'] is True
class TestTestReportDirectory:
def test_test_report_directory(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory(project, suite, timestamp, test, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', suite, timestamp,
test, test_set)
assert path == expected
class TestTestReportDirectorySingleTest:
def test_test_report_directory_single_test(self, project_session):
testdir, project = project_session.activate()
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory_single_test(project, test, timestamp, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', 'single_tests',
test, timestamp, test_set)
assert path == expected
class TestGetTestLog:
def test_get_test_x_log(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
log = get_test_debug_log(project, exc['timestamp'], test_name, test_set,
suite=exc['suite_name'])
assert 'root DEBUG test does not have setup function' in log
log = get_test_debug_log(project, exc['timestamp'], test_name,
'inexistent_test_set', suite=exc['suite_name'])
assert log is None
log = get_test_debug_log(project, exc['timestamp'], 'inexistent_test_name',
test_set, suite=exc['suite_name'])
assert log is None
class TestCreateReportDirectory:
def test_create_report_directory_test(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_001'
exec_dir = create_execution_dir_single_test(project, test_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=False)
assert os.path.isdir(directory)
def test_create_report_directory_suite(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
suite_name = 'suite_foo_002'
test_name = 'testing_report_002'
exec_dir = create_execution_directory(project, suite_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=True)
assert os.path.isdir(directory)
class TestGenerateReport:
def test_generate_report_with_env(self, project_session):
_, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_003'
suite_name = 'suite_foo_003'
exec_dir = create_execution_directory(project, suite_name, timestamp)
report_dir = create_report_directory(exec_dir, test_name, is_suite=True)
test_data = {
'env': {
'name': 'env01',
'url': '1.1.1.1'
},
'var2': 'value2'
}
test_data = test_runner.Data(test_data)
result = {
'result': 'success',
'errors': [],
'description': 'description of the test',
'steps': [
{'message': 'step1', 'screenshot': None, 'error': None},
{'message': 'step2', 'screenshot': None, 'error': None}
],
'test_elapsed_time': 22.22,
'test_timestamp': '2018.02.04.02.16.42.729',
'browser': 'chrome',
'browser_full_name': '',
'set_name': 'set_001',
}
generate_report(report_dir, test_name, test_data, result)
path = os.path.join(report_dir, 'report.json')
with open(path) as report_file:
actual = json.load(report_file)
assert len(actual.items()) == 11
assert actual['test_case'] == test_name
assert actual['result'] == 'success'
assert actual['steps'][0]['message'] == 'step1'
assert actual['steps'][1]['message'] == 'step2'
assert actual['description'] == 'description of the test'
assert actual['errors'] == []
assert actual['test_elapsed_time'] == 22.22
assert actual['test_timestamp'] == '2018.02.04.02.16.42.729'
assert actual['browser'] == 'chrome'
assert actual['environment'] == 'env01'
assert actual['set_name'] == 'set_001'
test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
assert actual['test_data']['env'] in [test_data_a, test_data_b]
assert actual['test_data']['var2'] == "'value2'"
| true
| true
|
7905a36d029e7a8f3c5f4b1ec7b16c66f9b20769
| 3,538
|
py
|
Python
|
src/ZServer/medusa/monitor_client.py
|
tseaver/Zope-RFA
|
08634f39b0f8b56403a2a9daaa6ee4479ef0c625
|
[
"ZPL-2.1"
] | 2
|
2015-12-21T10:34:56.000Z
|
2017-09-24T11:07:58.000Z
|
src/ZServer/medusa/monitor_client.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
src/ZServer/medusa/monitor_client.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, unix version.
import asyncore
import asynchat
import regsub
import socket
import string
import sys
import os
import time
try:
from hashlib import md5
except:
from md5 import new as md5
class stdin_channel (asyncore.file_dispatcher):
def handle_read (self):
data = self.recv(512)
if not data:
print '\nclosed.'
self.sock_channel.close()
try:
self.close()
except:
pass
data = regsub.gsub ('\n', '\r\n', data)
self.sock_channel.push (data)
def writable (self):
return 0
def log (self, *ignore):
pass
class monitor_client (asynchat.async_chat):
def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
asynchat.async_chat.__init__ (self)
self.create_socket (socket_type, socket.SOCK_STREAM)
self.terminator = '\r\n'
self.connect (addr)
self.sent_auth = 0
self.timestamp = ''
self.password = password
def collect_incoming_data (self, data):
if not self.sent_auth:
self.timestamp = self.timestamp + data
else:
sys.stdout.write (data)
sys.stdout.flush()
def found_terminator (self):
if not self.sent_auth:
self.push (hex_digest (self.timestamp + self.password) + '\r\n')
self.sent_auth = 1
else:
print
def handle_close (self):
# close all the channels, which will make the standard main
# loop exit.
map (lambda x: x.close(), asyncore.socket_map.values())
def log (self, *ignore):
pass
class encrypted_monitor_client (monitor_client):
"Wrap push() and recv() with a stream cipher"
def init_cipher (self, cipher, key):
self.outgoing = cipher.new (key)
self.incoming = cipher.new (key)
def push (self, data):
# push the encrypted data instead
return monitor_client.push (self, self.outgoing.encrypt (data))
def recv (self, block_size):
data = monitor_client.recv (self, block_size)
if data:
return self.incoming.decrypt (data)
else:
return data
def hex_digest (s):
m = md5()
m.update (s)
return string.join (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: %s host port' % sys.argv[0]
sys.exit(0)
if ('-e' in sys.argv):
encrypt = 1
sys.argv.remove ('-e')
else:
encrypt = 0
sys.stderr.write ('Enter Password: ')
sys.stderr.flush()
import os
try:
os.system ('stty -echo')
p = raw_input()
print
finally:
os.system ('stty echo')
stdin = stdin_channel (0)
if len(sys.argv) > 1:
if encrypt:
client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
import sapphire
client.init_cipher (sapphire, p)
else:
client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
else:
# default to local host, 'standard' port
client = monitor_client (p)
stdin.sock_channel = client
asyncore.loop()
| 27.215385
| 91
| 0.546354
|
import asyncore
import asynchat
import regsub
import socket
import string
import sys
import os
import time
try:
from hashlib import md5
except:
from md5 import new as md5
class stdin_channel (asyncore.file_dispatcher):
def handle_read (self):
data = self.recv(512)
if not data:
print '\nclosed.'
self.sock_channel.close()
try:
self.close()
except:
pass
data = regsub.gsub ('\n', '\r\n', data)
self.sock_channel.push (data)
def writable (self):
return 0
def log (self, *ignore):
pass
class monitor_client (asynchat.async_chat):
def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
asynchat.async_chat.__init__ (self)
self.create_socket (socket_type, socket.SOCK_STREAM)
self.terminator = '\r\n'
self.connect (addr)
self.sent_auth = 0
self.timestamp = ''
self.password = password
def collect_incoming_data (self, data):
if not self.sent_auth:
self.timestamp = self.timestamp + data
else:
sys.stdout.write (data)
sys.stdout.flush()
def found_terminator (self):
if not self.sent_auth:
self.push (hex_digest (self.timestamp + self.password) + '\r\n')
self.sent_auth = 1
else:
print
def handle_close (self):
map (lambda x: x.close(), asyncore.socket_map.values())
def log (self, *ignore):
pass
class encrypted_monitor_client (monitor_client):
"Wrap push() and recv() with a stream cipher"
def init_cipher (self, cipher, key):
self.outgoing = cipher.new (key)
self.incoming = cipher.new (key)
def push (self, data):
return monitor_client.push (self, self.outgoing.encrypt (data))
def recv (self, block_size):
data = monitor_client.recv (self, block_size)
if data:
return self.incoming.decrypt (data)
else:
return data
def hex_digest (s):
m = md5()
m.update (s)
return string.join (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: %s host port' % sys.argv[0]
sys.exit(0)
if ('-e' in sys.argv):
encrypt = 1
sys.argv.remove ('-e')
else:
encrypt = 0
sys.stderr.write ('Enter Password: ')
sys.stderr.flush()
import os
try:
os.system ('stty -echo')
p = raw_input()
print
finally:
os.system ('stty echo')
stdin = stdin_channel (0)
if len(sys.argv) > 1:
if encrypt:
client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
import sapphire
client.init_cipher (sapphire, p)
else:
client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
else:
client = monitor_client (p)
stdin.sock_channel = client
asyncore.loop()
| false
| true
|
7905a3b0bc4c122e96bdc46836294cf150d4da7d
| 4,995
|
py
|
Python
|
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-15T23:55:48.000Z
|
2019-09-15T23:55:48.000Z
|
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/cbtf-argonavis/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2017-01-21T17:19:32.000Z
|
2017-01-21T17:19:32.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CbtfArgonavis(CMakePackage):
"""CBTF Argo Navis project contains the CUDA collector and supporting
libraries that was done as a result of a DOE SBIR grant.
"""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
version('develop', branch='master')
version('1.9.3', branch='1.9.3')
version('1.9.2', branch='1.9.2')
version('1.9.1.2', branch='1.9.1.2')
version('1.9.1.1', branch='1.9.1.1')
version('1.9.1.0', branch='1.9.1.0')
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
variant('crayfe', default=False,
description="build only the FE tool using the runtime_dir \
to point to target build.")
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('build_type', default='None', values=('None'),
description='CMake build type')
depends_on("cmake@3.0.2:", type='build')
# To specify ^elfutils@0.170 on the command line spack
# apparently needs/wants this dependency explicity here
# even though it is referenced downstream
depends_on("elf", type="link")
# For boost
depends_on("boost@1.66.0:1.69.0")
# For MRNet
depends_on("mrnet@5.0.1-3:+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3:+lwthreads", when='@develop~cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+lwthreads", when='@1.9.1.0:9999~cti', type=('build', 'link', 'run'))
# For CBTF
depends_on("cbtf@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
# For CBTF with cti
depends_on("cbtf@develop+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
# For CBTF with runtime
depends_on("cbtf@develop+runtime", when='@develop+runtime', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999+runtime", when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
# For libmonitor
depends_on("libmonitor@2013.02.18+krellpatch", type=('build', 'link', 'run'))
# For PAPI
depends_on("papi@5.4.1:", type=('build', 'link', 'run'))
# For CBTF-KRELL
depends_on("cbtf-krell@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf-krell@1.9.1.0:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+cti', when='@develop+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.1.0:9999+cti', when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+runtime', when='@develop+runtime', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.1.0:9999+runtime', when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
# For CUDA
depends_on("cuda")
parallel = False
build_directory = 'build_cbtf_argonavis'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCUDA_DIR=%s' % spec['cuda'].prefix,
'-DCUDA_INSTALL_PATH=%s' % spec['cuda'].prefix,
'-DCUDA_TOOLKIT_ROOT_DIR=%s' % spec['cuda'].prefix,
'-DCUPTI_DIR=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DCUPTI_ROOT=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DPAPI_ROOT=%s' % spec['papi'].prefix,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DBOOST_ROOT=%s' % spec['boost'].prefix,
'-DBoost_DIR=%s' % spec['boost'].prefix,
'-DBOOST_LIBRARYDIR=%s' % spec['boost'].prefix.lib,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DLIBMONITOR_DIR=%s' % spec['libmonitor'].prefix,
'-DBoost_NO_SYSTEM_PATHS=ON']
return cmake_args
def setup_run_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
def setup_build_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
| 41.280992
| 110
| 0.603003
|
from spack import *
class CbtfArgonavis(CMakePackage):
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
version('develop', branch='master')
version('1.9.3', branch='1.9.3')
version('1.9.2', branch='1.9.2')
version('1.9.1.2', branch='1.9.1.2')
version('1.9.1.1', branch='1.9.1.1')
version('1.9.1.0', branch='1.9.1.0')
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
variant('crayfe', default=False,
description="build only the FE tool using the runtime_dir \
to point to target build.")
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('build_type', default='None', values=('None'),
description='CMake build type')
depends_on("cmake@3.0.2:", type='build')
depends_on("elf", type="link")
depends_on("boost@1.66.0:1.69.0")
depends_on("mrnet@5.0.1-3:+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3:+lwthreads", when='@develop~cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on("mrnet@5.0.1-3+lwthreads", when='@1.9.1.0:9999~cti', type=('build', 'link', 'run'))
depends_on("cbtf@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
depends_on("cbtf@develop+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on("cbtf@develop+runtime", when='@develop+runtime', type=('build', 'link', 'run'))
depends_on("cbtf@1.9.1.0:9999+runtime", when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
depends_on("libmonitor@2013.02.18+krellpatch", type=('build', 'link', 'run'))
depends_on("papi@5.4.1:", type=('build', 'link', 'run'))
depends_on("cbtf-krell@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("cbtf-krell@1.9.1.0:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+cti', when='@develop+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.1.0:9999+cti', when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+runtime', when='@develop+runtime', type=('build', 'link', 'run'))
depends_on('cbtf-krell@1.9.1.0:9999+runtime', when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
depends_on("cuda")
parallel = False
build_directory = 'build_cbtf_argonavis'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCUDA_DIR=%s' % spec['cuda'].prefix,
'-DCUDA_INSTALL_PATH=%s' % spec['cuda'].prefix,
'-DCUDA_TOOLKIT_ROOT_DIR=%s' % spec['cuda'].prefix,
'-DCUPTI_DIR=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DCUPTI_ROOT=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DPAPI_ROOT=%s' % spec['papi'].prefix,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DBOOST_ROOT=%s' % spec['boost'].prefix,
'-DBoost_DIR=%s' % spec['boost'].prefix,
'-DBOOST_LIBRARYDIR=%s' % spec['boost'].prefix.lib,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DLIBMONITOR_DIR=%s' % spec['libmonitor'].prefix,
'-DBoost_NO_SYSTEM_PATHS=ON']
return cmake_args
def setup_run_environment(self, env):
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
def setup_build_environment(self, env):
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
| true
| true
|
7905a3ba74bd26df0decc455f3ac247fafe88384
| 1,551
|
py
|
Python
|
indico/core/settings/models/settings.py
|
UNOG-Indico/UNOG-Indico-v2
|
4fa4393cc1f3b453a69f5e0ea3b52c18337831a5
|
[
"MIT"
] | null | null | null |
indico/core/settings/models/settings.py
|
UNOG-Indico/UNOG-Indico-v2
|
4fa4393cc1f3b453a69f5e0ea3b52c18337831a5
|
[
"MIT"
] | null | null | null |
indico/core/settings/models/settings.py
|
UNOG-Indico/UNOG-Indico-v2
|
4fa4393cc1f3b453a69f5e0ea3b52c18337831a5
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.settings.models.base import JSONSettingsBase, PrincipalSettingsBase
from indico.util.decorators import strict_classproperty
from indico.util.string import return_ascii
class CoreSettingsMixin(object):
@strict_classproperty
@staticmethod
def __auto_table_args():
return (db.Index(None, 'module', 'name'),
{'schema': 'indico'})
class Setting(JSONSettingsBase, CoreSettingsMixin, db.Model):
@strict_classproperty
@staticmethod
def __auto_table_args():
return db.UniqueConstraint('module', 'name'),
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<Setting({}, {}, {!r})>'.format(self.module, self.name, self.value)
class SettingPrincipal(PrincipalSettingsBase, CoreSettingsMixin, db.Model):
principal_backref_name = 'in_settings_acls'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<SettingPrincipal({}, {}, {!r})>'.format(self.module, self.name, self.principal)
| 29.826923
| 96
| 0.727917
|
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.settings.models.base import JSONSettingsBase, PrincipalSettingsBase
from indico.util.decorators import strict_classproperty
from indico.util.string import return_ascii
class CoreSettingsMixin(object):
@strict_classproperty
@staticmethod
def __auto_table_args():
return (db.Index(None, 'module', 'name'),
{'schema': 'indico'})
class Setting(JSONSettingsBase, CoreSettingsMixin, db.Model):
@strict_classproperty
@staticmethod
def __auto_table_args():
return db.UniqueConstraint('module', 'name'),
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<Setting({}, {}, {!r})>'.format(self.module, self.name, self.value)
class SettingPrincipal(PrincipalSettingsBase, CoreSettingsMixin, db.Model):
principal_backref_name = 'in_settings_acls'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<SettingPrincipal({}, {}, {!r})>'.format(self.module, self.name, self.principal)
| true
| true
|
7905a491fef8d48c70a33fdb2c698e4d60d37834
| 5,512
|
py
|
Python
|
tests/metrics/test_metrics.py
|
GueroudjiAmal/dask-ml
|
54a8913bfb22775c72ffd781bf29d6e53b5dd363
|
[
"BSD-3-Clause"
] | 803
|
2017-06-16T02:08:30.000Z
|
2022-03-28T14:02:25.000Z
|
tests/metrics/test_metrics.py
|
GueroudjiAmal/dask-ml
|
54a8913bfb22775c72ffd781bf29d6e53b5dd363
|
[
"BSD-3-Clause"
] | 748
|
2017-09-24T20:32:33.000Z
|
2022-03-28T18:49:27.000Z
|
tests/metrics/test_metrics.py
|
GueroudjiAmal/dask-ml
|
54a8913bfb22775c72ffd781bf29d6e53b5dd363
|
[
"BSD-3-Clause"
] | 250
|
2017-06-15T15:57:18.000Z
|
2022-03-25T08:31:02.000Z
|
import dask
import dask.array as da
import numpy as np
import numpy.testing as npt
import pytest
import sklearn
import sklearn.linear_model
import sklearn.metrics
from dask.array.utils import assert_eq
import dask_ml.metrics
import dask_ml.wrappers
def test_pairwise_distances(X_blobs):
centers = X_blobs[::100].compute()
result = dask_ml.metrics.pairwise_distances(X_blobs, centers)
expected = sklearn.metrics.pairwise_distances(X_blobs.compute(), centers)
assert_eq(result, expected, atol=1e-4)
def test_pairwise_distances_argmin_min(X_blobs):
centers = X_blobs[::100].compute()
# X_blobs has 500 rows per block.
# Ensure 500 rows in the scikit-learn version too.
working_memory = float(80 * 500) / 2 ** 20
ctx = sklearn.config_context(working_memory=working_memory)
with ctx:
a_, b_ = sklearn.metrics.pairwise_distances_argmin_min(
X_blobs.compute(), centers
)
a, b = dask_ml.metrics.pairwise_distances_argmin_min(X_blobs, centers)
a, b = dask.compute(a, b)
npt.assert_array_equal(a, a_)
npt.assert_array_equal(b, b_)
def test_euclidean_distances():
X = da.random.uniform(size=(100, 4), chunks=50)
Y = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, Y)
b = sklearn.metrics.euclidean_distances(X, Y)
assert_eq(a, b)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
a = dask_ml.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
assert_eq(a, b)
y_norm_squared = (Y ** 2).sum(axis=1).compute()[np.newaxis, :]
a = dask_ml.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
assert_eq(a, b)
def test_euclidean_distances_same():
X = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, X)
b = sklearn.metrics.euclidean_distances(X, X)
assert_eq(a, b, atol=1e-4)
a = dask_ml.metrics.euclidean_distances(X)
b = sklearn.metrics.euclidean_distances(X)
assert_eq(a, b, atol=1e-4)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
assert_eq(X, X, Y_norm_squared=x_norm_squared, atol=1e-4)
@pytest.mark.parametrize("kernel", ["linear", "polynomial", "rbf", "sigmoid"])
def test_pairwise_kernels(kernel):
X = da.random.uniform(size=(100, 4), chunks=(50, 4))
a = dask_ml.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
b = sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
r1 = a(X)
r2 = b(X.compute())
assert isinstance(X, da.Array)
assert_eq(r1, r2)
@pytest.mark.parametrize("sample_weight", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("labels", [[0, 1], [0, 1, 3], [1, 0]])
@pytest.mark.parametrize("daskify", [True, False])
def test_log_loss(labels, normalize, sample_weight, daskify):
n = 100
c = 25
y_true = np.random.choice(labels, size=n)
y_pred = np.random.uniform(size=(n, len(labels)))
y_pred /= y_pred.sum(1, keepdims=True)
if sample_weight:
sample_weight = np.random.uniform(size=n)
sample_weight /= sample_weight.sum()
dsample_weight = da.from_array(sample_weight, chunks=c)
else:
sample_weight = None
dsample_weight = None
if daskify:
dy_true = da.from_array(y_true, chunks=c)
dy_pred = da.from_array(y_pred, chunks=c)
else:
dy_true = y_true
dy_pred = y_pred
(dsample_weight,) = dask.compute(dsample_weight)
a = sklearn.metrics.log_loss(
y_true, y_pred, normalize=normalize, sample_weight=sample_weight
)
b = dask_ml.metrics.log_loss(
dy_true,
dy_pred,
labels=labels,
normalize=normalize,
sample_weight=dsample_weight,
)
assert_eq(a, b)
@pytest.mark.parametrize(
"yhat",
[
da.from_array(np.array([0.25, 0.25, 0.75, 0.75]), chunks=2),
da.from_array(np.array([0, 0, 1, 1]), chunks=2),
da.from_array(
np.array([[0.75, 0.25], [0.75, 0.25], [0.25, 0.75], [0.25, 0.75]]), chunks=2
),
],
)
def test_log_loss_shape(yhat):
y = da.from_array(np.array([0, 0, 1, 1]), chunks=2)
labels = [0, 1]
a = sklearn.metrics.log_loss(y, yhat)
b = dask_ml.metrics.log_loss(y, yhat, labels=labels)
assert_eq(a, b)
@pytest.mark.parametrize("y", [[0, 1, 1, 0], [0, 1, 2, 0]])
def test_log_loss_scoring(y):
# a_scorer = sklearn.metrics.get_scorer('neg_log_loss')
# b_scorer = dask_ml.metrics.get_scorer('neg_log_loss')
X = da.random.uniform(size=(4, 2), chunks=2)
labels = np.unique(y)
y = da.from_array(np.array(y), chunks=2)
a_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
b_scorer = sklearn.metrics.make_scorer(
dask_ml.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
clf = dask_ml.wrappers.ParallelPostFit(
sklearn.linear_model.LogisticRegression(
n_jobs=1, solver="lbfgs", multi_class="auto"
)
)
clf.fit(*dask.compute(X, y))
result = b_scorer(clf, X, y)
expected = a_scorer(clf, *dask.compute(X, y))
assert_eq(result, expected)
| 31.141243
| 88
| 0.662917
|
import dask
import dask.array as da
import numpy as np
import numpy.testing as npt
import pytest
import sklearn
import sklearn.linear_model
import sklearn.metrics
from dask.array.utils import assert_eq
import dask_ml.metrics
import dask_ml.wrappers
def test_pairwise_distances(X_blobs):
centers = X_blobs[::100].compute()
result = dask_ml.metrics.pairwise_distances(X_blobs, centers)
expected = sklearn.metrics.pairwise_distances(X_blobs.compute(), centers)
assert_eq(result, expected, atol=1e-4)
def test_pairwise_distances_argmin_min(X_blobs):
centers = X_blobs[::100].compute()
working_memory = float(80 * 500) / 2 ** 20
ctx = sklearn.config_context(working_memory=working_memory)
with ctx:
a_, b_ = sklearn.metrics.pairwise_distances_argmin_min(
X_blobs.compute(), centers
)
a, b = dask_ml.metrics.pairwise_distances_argmin_min(X_blobs, centers)
a, b = dask.compute(a, b)
npt.assert_array_equal(a, a_)
npt.assert_array_equal(b, b_)
def test_euclidean_distances():
X = da.random.uniform(size=(100, 4), chunks=50)
Y = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, Y)
b = sklearn.metrics.euclidean_distances(X, Y)
assert_eq(a, b)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
a = dask_ml.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
assert_eq(a, b)
y_norm_squared = (Y ** 2).sum(axis=1).compute()[np.newaxis, :]
a = dask_ml.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
assert_eq(a, b)
def test_euclidean_distances_same():
X = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, X)
b = sklearn.metrics.euclidean_distances(X, X)
assert_eq(a, b, atol=1e-4)
a = dask_ml.metrics.euclidean_distances(X)
b = sklearn.metrics.euclidean_distances(X)
assert_eq(a, b, atol=1e-4)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
assert_eq(X, X, Y_norm_squared=x_norm_squared, atol=1e-4)
@pytest.mark.parametrize("kernel", ["linear", "polynomial", "rbf", "sigmoid"])
def test_pairwise_kernels(kernel):
X = da.random.uniform(size=(100, 4), chunks=(50, 4))
a = dask_ml.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
b = sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
r1 = a(X)
r2 = b(X.compute())
assert isinstance(X, da.Array)
assert_eq(r1, r2)
@pytest.mark.parametrize("sample_weight", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("labels", [[0, 1], [0, 1, 3], [1, 0]])
@pytest.mark.parametrize("daskify", [True, False])
def test_log_loss(labels, normalize, sample_weight, daskify):
n = 100
c = 25
y_true = np.random.choice(labels, size=n)
y_pred = np.random.uniform(size=(n, len(labels)))
y_pred /= y_pred.sum(1, keepdims=True)
if sample_weight:
sample_weight = np.random.uniform(size=n)
sample_weight /= sample_weight.sum()
dsample_weight = da.from_array(sample_weight, chunks=c)
else:
sample_weight = None
dsample_weight = None
if daskify:
dy_true = da.from_array(y_true, chunks=c)
dy_pred = da.from_array(y_pred, chunks=c)
else:
dy_true = y_true
dy_pred = y_pred
(dsample_weight,) = dask.compute(dsample_weight)
a = sklearn.metrics.log_loss(
y_true, y_pred, normalize=normalize, sample_weight=sample_weight
)
b = dask_ml.metrics.log_loss(
dy_true,
dy_pred,
labels=labels,
normalize=normalize,
sample_weight=dsample_weight,
)
assert_eq(a, b)
@pytest.mark.parametrize(
"yhat",
[
da.from_array(np.array([0.25, 0.25, 0.75, 0.75]), chunks=2),
da.from_array(np.array([0, 0, 1, 1]), chunks=2),
da.from_array(
np.array([[0.75, 0.25], [0.75, 0.25], [0.25, 0.75], [0.25, 0.75]]), chunks=2
),
],
)
def test_log_loss_shape(yhat):
y = da.from_array(np.array([0, 0, 1, 1]), chunks=2)
labels = [0, 1]
a = sklearn.metrics.log_loss(y, yhat)
b = dask_ml.metrics.log_loss(y, yhat, labels=labels)
assert_eq(a, b)
@pytest.mark.parametrize("y", [[0, 1, 1, 0], [0, 1, 2, 0]])
def test_log_loss_scoring(y):
X = da.random.uniform(size=(4, 2), chunks=2)
labels = np.unique(y)
y = da.from_array(np.array(y), chunks=2)
a_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
b_scorer = sklearn.metrics.make_scorer(
dask_ml.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
clf = dask_ml.wrappers.ParallelPostFit(
sklearn.linear_model.LogisticRegression(
n_jobs=1, solver="lbfgs", multi_class="auto"
)
)
clf.fit(*dask.compute(X, y))
result = b_scorer(clf, X, y)
expected = a_scorer(clf, *dask.compute(X, y))
assert_eq(result, expected)
| true
| true
|
7905a5d753bcd672a340bd2a4c3eafdb6f4ef789
| 1,381
|
py
|
Python
|
setup.py
|
Roming22/python-k3x
|
ce127b22fd88f9edf98c0959ae5abfd579f8ce32
|
[
"MIT"
] | null | null | null |
setup.py
|
Roming22/python-k3x
|
ce127b22fd88f9edf98c0959ae5abfd579f8ce32
|
[
"MIT"
] | null | null | null |
setup.py
|
Roming22/python-k3x
|
ce127b22fd88f9edf98c0959ae5abfd579f8ce32
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyk3x",
author="Roming22",
author_email="roming22@gmail.com",
description="API to simplify k3d deployments",
keywords="kuberbetes, k3s, k3d, k3x, cluster",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Roming22/pyk3x",
project_urls={
"Documentation": "https://github.com/Roming22/pyk3x",
"Bug Reports": "https://github.com/Roming22/pyk3x/issues",
"Source Code": "https://github.com/Roming22/pyk3x",
# 'Funding': '',
# 'Say Thanks!': '',
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
# see https://pypi.org/classifiers/
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
| 35.410256
| 66
| 0.6126
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyk3x",
author="Roming22",
author_email="roming22@gmail.com",
description="API to simplify k3d deployments",
keywords="kuberbetes, k3s, k3d, k3x, cluster",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Roming22/pyk3x",
project_urls={
"Documentation": "https://github.com/Roming22/pyk3x",
"Bug Reports": "https://github.com/Roming22/pyk3x/issues",
"Source Code": "https://github.com/Roming22/pyk3x",
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
| true
| true
|
7905a6207f544c242111412b4bbc16073524ac14
| 12,391
|
py
|
Python
|
multigrid/patch1d.py
|
python-hydro/hydro_examples
|
55b7750a7644f3e2187f7fe338b6bc1d6fb9c139
|
[
"BSD-3-Clause"
] | 66
|
2018-09-01T10:44:07.000Z
|
2022-03-26T23:50:57.000Z
|
multigrid/patch1d.py
|
python-hydro/hydro_examples
|
55b7750a7644f3e2187f7fe338b6bc1d6fb9c139
|
[
"BSD-3-Clause"
] | null | null | null |
multigrid/patch1d.py
|
python-hydro/hydro_examples
|
55b7750a7644f3e2187f7fe338b6bc1d6fb9c139
|
[
"BSD-3-Clause"
] | 39
|
2018-09-06T20:02:14.000Z
|
2022-02-27T17:05:24.000Z
|
"""
The patch module allows for a grid to be created and for data to be
defined on that grid.
Typical usage:
-- create the grid
grid = Grid1d(nx)
-- create the data that lives on that grid
data = CellCenterData1d(grid)
bcObj = bcObject(xlb="reflect", xrb="reflect"_
data.registerVar("density", bcObj)
...
data.create()
-- initialize some data
dens = data.get_var("density")
dens[:,:] = ...
-- fill the ghost cells
data.fil_lBC("density")
"""
from __future__ import print_function
import sys
import numpy
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject(object):
"""
Boundary condition container -- hold the BCs on each boundary
for a single variable
"""
def __init__(self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
# note: "reflect" is ambiguous and will be converted into
# either reflect-even (the default) or reflect-odd
if xlb not in valid or xrb not in valid:
sys.exit("ERROR: invalid BC")
# -x boundary
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# +x boundary
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# periodic checks
if ((xlb == "periodic" and xrb != "periodic") or
(xrb == "periodic" and xlb != "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
""" print out some basic information about the BC object """
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d(object):
"""
the 1-d grid class. The grid object will contain the coordinate
information (at various centerings).
A basic (1-d) representation of the layout is:
| | | X | | | | X | | |
+--*--+- // -+--*--X--*--+--*--+- // -+--*--+--*--X--*--+- // -+--*--+
0 ng-1 ng ng+1 ... ng+nx-1 ng+nx 2ng+nx-1
ilo ihi
|<- ng ghostcells->|<---- nx interior zones ----->|<- ng ghostcells->|
The '*' marks the data locations.
"""
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
"""
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
"""
# size of grid
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
# domain extrema
self.xmin = xmin
self.xmax = xmax
# compute the indices of the block interior (excluding guardcells)
self.ilo = ng
self.ihi = ng+nx-1
# define the coordinate information at the left, center, and right
# zone coordinates
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
""" print out some basic information about the grid object """
return "1-d grid: nx = {}, ng = {}".format(self.nx, self.ng)
class CellCenterData1d(object):
"""
the cell-centered data that lives on a grid.
a CellCenterData1d object is built in a multi-step process before it can
be used. We pass in a grid object to describe where the data
lives:
my_data = patch.CellCenterData1d(myGrid)
register any variables that we expect to live on this patch. Here
bcObject describes the boundary conditions for that variable.
my_data.registerVar('density', bcObject)
my_data.registerVar('x-momentum', bcObject)
...
finally, finish the initialization of the patch
my_data.create()
This last step actually allocates the storage for the state
variables. Once this is done, the patch is considered to be
locked. New variables cannot be added.
"""
def __init__(self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
# time
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
"""
register a variable with CellCenterData1d object. Here we pass in a
BCObject that describes the boundary conditions for that
variable.
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
"""
called after all the variables are registered and allocates
the storage for the state data
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
""" print out some basic information about the ccData2d object """
if self.initialized == 0:
mystr = "CellCenterData1d object not yet initialized"
return mystr
mystr = "cc data: nx = {}, ng = {}\n".format(self.grid.nx, self.grid.ng) + \
" nvars = {}\n".format(self.nvar) + \
"variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
for n in range(self.nvar):
mystr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n, ilo:ihi+1]),
numpy.max(self.data[n, ilo:ihi+1]))
mystr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" ", self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
return mystr
def get_var(self, name):
"""
return a data array the variable described by name. Any changes
made to this are automatically reflected in the CellCenterData1d
object.
"""
n = self.vars.index(name)
return self.data[n, :]
def zero(self, name):
n = self.vars.index(name)
self.data[n, :] = 0.0
def fill_BC_all(self):
"""
fill boundary conditions on all variables
"""
for name in self.vars:
self.fill_BC(name)
def fill_BC(self, name):
"""
fill the boundary conditions. This operates on a single state
variable at a time, to allow for maximum flexibility
we do periodic, reflect-even, reflect-odd, and outflow
each variable name has a corresponding bc_object stored in the
ccData2d object -- we refer to this to figure out the action
to take at each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
n = self.vars.index(name)
# -x boundary
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ilo]
elif self.BCs[name].xlb == "reflect-even":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ilo):
self.data[n, i] = -self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb == "periodic":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ihi-self.grid.ng+i+1]
# +x boundary
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
for i in range(self.grid.ihi+1, self.grid.nx+2*self.grid.ng):
self.data[n, i] = self.data[n, self.grid.ihi]
elif self.BCs[name].xrb == "reflect-even":
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = self.data[n, i_src]
elif self.BCs[name].xrb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = -self.data[n, i_src]
elif self.BCs[name].xrb == "periodic":
for i in range(self.grid.ihi+1, 2*self.grid.ng + self.grid.nx):
self.data[n, i] = self.data[n, i-self.grid.ihi-1+self.grid.ng]
def restrict(self, varname):
"""
restrict the variable varname to a coarser grid (factor of 2
coarser) and return an array with the resulting data (and same
number of ghostcells)
"""
fG = self.grid
fData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_c = fG.ng
nx_c = fG.nx//2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
# fill the coarse array with the restricted data -- just
# average the 2 fine cells into the corresponding coarse cell
# that encompasses them.
# This is done by shifting our view into the fData array and
# using a stride of 2 in the indexing.
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
"""
prolong the data in the current (coarse) grid to a finer
(factor of 2 finer) grid. Return an array with the resulting
data (and same number of ghostcells).
We will reconstruct the data in the zone from the
zone-averaged variables using the centered-difference slopes
(x)
f(x,y) = m x/dx + <f>
When averaged over the parent cell, this reproduces <f>.
Each zone's reconstrution will be averaged over 2 children.
| | | | |
| <f> | --> | | |
| | | 1 | 2 |
+-----------+ +-----+-----+
We will fill each of the finer resolution zones by filling all
the 1's together, using a stride 2 into the fine array. Then
the 2's, this allows us to operate in a vector
fashion. All operations will use the same slopes for their
respective parents.
"""
cG = self.grid
cData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
# slopes for the coarse data
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
# fill the '1' children
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
# fill the '2' children
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__ == "__main__":
# illustrate basic mesh operations
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
| 28.161364
| 84
| 0.548463
|
from __future__ import print_function
import sys
import numpy
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject(object):
def __init__(self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
if xlb not in valid or xrb not in valid:
sys.exit("ERROR: invalid BC")
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
if ((xlb == "periodic" and xrb != "periodic") or
(xrb == "periodic" and xlb != "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d(object):
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
self.xmin = xmin
self.xmax = xmax
self.ilo = ng
self.ihi = ng+nx-1
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
return "1-d grid: nx = {}, ng = {}".format(self.nx, self.ng)
class CellCenterData1d(object):
def __init__(self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
if self.initialized == 0:
mystr = "CellCenterData1d object not yet initialized"
return mystr
mystr = "cc data: nx = {}, ng = {}\n".format(self.grid.nx, self.grid.ng) + \
" nvars = {}\n".format(self.nvar) + \
"variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
for n in range(self.nvar):
mystr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n, ilo:ihi+1]),
numpy.max(self.data[n, ilo:ihi+1]))
mystr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" ", self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
return mystr
def get_var(self, name):
n = self.vars.index(name)
return self.data[n, :]
def zero(self, name):
n = self.vars.index(name)
self.data[n, :] = 0.0
def fill_BC_all(self):
for name in self.vars:
self.fill_BC(name)
def fill_BC(self, name):
n = self.vars.index(name)
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ilo]
elif self.BCs[name].xlb == "reflect-even":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ilo):
self.data[n, i] = -self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb == "periodic":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ihi-self.grid.ng+i+1]
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
for i in range(self.grid.ihi+1, self.grid.nx+2*self.grid.ng):
self.data[n, i] = self.data[n, self.grid.ihi]
elif self.BCs[name].xrb == "reflect-even":
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = self.data[n, i_src]
elif self.BCs[name].xrb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = -self.data[n, i_src]
elif self.BCs[name].xrb == "periodic":
for i in range(self.grid.ihi+1, 2*self.grid.ng + self.grid.nx):
self.data[n, i] = self.data[n, i-self.grid.ihi-1+self.grid.ng]
def restrict(self, varname):
fG = self.grid
fData = self.get_var(varname)
ng_c = fG.ng
nx_c = fG.nx//2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
cG = self.grid
cData = self.get_var(varname)
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__ == "__main__":
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
| true
| true
|
7905a7207409a36e542edd41a689eb3240d45b7e
| 432
|
py
|
Python
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 1
|
2022-02-12T05:56:04.000Z
|
2022-02-12T05:56:04.000Z
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 182
|
2020-04-30T00:51:36.000Z
|
2021-09-07T04:15:05.000Z
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 4
|
2020-04-29T22:04:20.000Z
|
2021-07-13T20:04:14.000Z
|
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
| 18.782609
| 53
| 0.581019
|
def length(head) -> int:
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
| true
| true
|
7905a72b145a810969c9606e314671b690aa0c55
| 1,741
|
py
|
Python
|
mf_localization_mapping/script/check_topic_size.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | 14
|
2020-12-05T17:15:57.000Z
|
2022-02-09T05:51:09.000Z
|
mf_localization_mapping/script/check_topic_size.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | 22
|
2021-02-25T06:56:23.000Z
|
2022-03-16T13:15:45.000Z
|
mf_localization_mapping/script/check_topic_size.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | 7
|
2021-01-05T16:07:59.000Z
|
2022-03-24T02:48:14.000Z
|
#!/usr/bin/env python
# Copyright (c) 2021 IBM Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import rosbag
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input_bag", required=True)
args = parser.parse_args()
input_bag = args.input_bag
topic_size_dict = {}
for topic, msg, time in rosbag.Bag(input_bag, 'r').read_messages(raw=True):
topic_size_dict[topic] = topic_size_dict.get(topic, 0) + len(msg[1])
topic_size = list(topic_size_dict.items())
topic_size.sort(key=lambda x: x[1])
print("topic", "size [GB]")
for topic, size in topic_size:
size_gb = size/(1024.0**3)
print(topic, size_gb)
if __name__ == "__main__":
main()
| 37.847826
| 80
| 0.746123
|
import sys
import argparse
import rosbag
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input_bag", required=True)
args = parser.parse_args()
input_bag = args.input_bag
topic_size_dict = {}
for topic, msg, time in rosbag.Bag(input_bag, 'r').read_messages(raw=True):
topic_size_dict[topic] = topic_size_dict.get(topic, 0) + len(msg[1])
topic_size = list(topic_size_dict.items())
topic_size.sort(key=lambda x: x[1])
print("topic", "size [GB]")
for topic, size in topic_size:
size_gb = size/(1024.0**3)
print(topic, size_gb)
if __name__ == "__main__":
main()
| true
| true
|
7905a8b87ee2f87d608a2f25985e31a7375aa98a
| 10,501
|
py
|
Python
|
baselines/deepq/simple.py
|
hyperdo/python2-baselines
|
ef2319bb18fa694ff8db34b667fb3702acebe608
|
[
"MIT"
] | 1
|
2018-04-02T12:48:29.000Z
|
2018-04-02T12:48:29.000Z
|
baselines/deepq/simple.py
|
hyperdo/python2-baselines
|
ef2319bb18fa694ff8db34b667fb3702acebe608
|
[
"MIT"
] | null | null | null |
baselines/deepq/simple.py
|
hyperdo/python2-baselines
|
ef2319bb18fa694ff8db34b667fb3702acebe608
|
[
"MIT"
] | null | null | null |
from backports import tempfile
import numpy as np
import os
import dill
import tensorflow as tf
import zipfile
import baselines.common.tf_util as U
from build_graph import build_act, build_train
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path, num_cpu=16):
with open(path, "rb") as f:
model_data, act_params = dill.load(f)
act = build_act(**act_params)
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path):
"""Save model to a pickle located at `path`"""
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
dill.dump((model_data, self._act_params), f)
def load(path, num_cpu=16):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path, num_cpu=num_cpu)
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=1,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16,
callback=None):
"""Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
act, train, update_target, debug = build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params)
| 39.182836
| 100
| 0.622322
|
from backports import tempfile
import numpy as np
import os
import dill
import tensorflow as tf
import zipfile
import baselines.common.tf_util as U
from build_graph import build_act, build_train
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path, num_cpu=16):
with open(path, "rb") as f:
model_data, act_params = dill.load(f)
act = build_act(**act_params)
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path):
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
dill.dump((model_data, self._act_params), f)
def load(path, num_cpu=16):
return ActWrapper.load(path, num_cpu=num_cpu)
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=1,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16,
callback=None):
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
act, train, update_target, debug = build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if t > learning_starts and t % train_freq == 0:
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params)
| true
| true
|
7905a92b2cab45b06ea81128cfdb3ad4bc8d66c9
| 1,740
|
py
|
Python
|
jhu2json.py
|
florath/jhu2db
|
a20b75fff505cde3d2c3364943f7a535c962029f
|
[
"MIT"
] | 1
|
2020-03-21T12:34:18.000Z
|
2020-03-21T12:34:18.000Z
|
jhu2json.py
|
florath/jhu2db
|
a20b75fff505cde3d2c3364943f7a535c962029f
|
[
"MIT"
] | null | null | null |
jhu2json.py
|
florath/jhu2db
|
a20b75fff505cde3d2c3364943f7a535c962029f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import csv
import os
import argparse
import dateutil.parser
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, required=True,
help="name of the data directory")
args = parser.parse_args()
return args.dir
def convert_ts(ts_str):
return dateutil.parser.parse(ts_str).timestamp()
def get_data(data, fname):
with open(fname, newline='') as csvfile:
content = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in content:
if len(line) < 5:
continue
try:
ts = convert_ts(line[2])
adm = [line[1]]
if line[0] != '':
adm.append(line[0])
data.append(
{
'date': ts,
'adm': adm,
'infected': int(line[3]),
'deaths': int(line[4]),
'recovered': int(line[5]),
'sex': 'NaN', # Not sure why this is needed????
# 'source': 'JHU',
'source': ObjectId("5e75f8d7745bde4a48972b42")
})
except ValueError as ve:
# If there is a problem e.g. converting the ts
# just go on.
pass
def convert2json(dir_name):
data = []
for fname in os.listdir(dir_name):
get_data(data, os.path.join(dir_name, fname))
return data
def main():
dir_name = parse_args()
data = convert2json(dir_name)
print(json.dumps(data))
if __name__ == '__main__':
main()
| 27.619048
| 71
| 0.486782
|
import csv
import os
import argparse
import dateutil.parser
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, required=True,
help="name of the data directory")
args = parser.parse_args()
return args.dir
def convert_ts(ts_str):
return dateutil.parser.parse(ts_str).timestamp()
def get_data(data, fname):
with open(fname, newline='') as csvfile:
content = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in content:
if len(line) < 5:
continue
try:
ts = convert_ts(line[2])
adm = [line[1]]
if line[0] != '':
adm.append(line[0])
data.append(
{
'date': ts,
'adm': adm,
'infected': int(line[3]),
'deaths': int(line[4]),
'recovered': int(line[5]),
'sex': 'NaN', # Not sure why this is needed????
# 'source': 'JHU',
'source': ObjectId("5e75f8d7745bde4a48972b42")
})
except ValueError as ve:
# If there is a problem e.g. converting the ts
# just go on.
pass
def convert2json(dir_name):
data = []
for fname in os.listdir(dir_name):
get_data(data, os.path.join(dir_name, fname))
return data
def main():
dir_name = parse_args()
data = convert2json(dir_name)
print(json.dumps(data))
if __name__ == '__main__':
main()
| true
| true
|
7905a96a80489b073db37932d98171c80d9ae352
| 9,983
|
py
|
Python
|
.env/lib/python2.7/site-packages/skimage/segmentation/boundaries.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 1
|
2019-05-24T00:46:48.000Z
|
2019-05-24T00:46:48.000Z
|
.env/lib/python2.7/site-packages/skimage/segmentation/boundaries.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
.env/lib/python2.7/site-packages/skimage/segmentation/boundaries.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
from __future__ import division
import numpy as np
from scipy import ndimage as ndi
from ..morphology import dilation, erosion, square
from ..util import img_as_float, view_as_windows
from ..color import gray2rgb
def _find_boundaries_subpixel(label_img):
"""See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
"""
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],
label_img.dtype)
pixels = (slice(None, None, 2), ) * ndim
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1,
mode='constant', constant_values=0),
(3,) * ndim)
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if len(values) > 2: # single value and max_label
boundaries[index] = True
return boundaries
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"""Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int or bool
An array in which different regions are labeled with either different
integers or boolean values.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> bool_image = np.array([[False, False, False, False, False],
... [False, False, False, False, False],
... [False, False, True, True, True],
... [False, False, True, True, True],
... [False, False, True, True, True]], dtype=np.bool)
>>> find_boundaries(bool_image)
array([[False, False, False, False, False],
[False, False, True, True, True],
[False, True, True, True, True],
[False, True, True, False, False],
[False, True, True, False, False]], dtype=bool)
"""
if label_img.dtype == 'bool':
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if mode != 'subpixel':
boundaries = dilation(label_img, selem) != erosion(label_img, selem)
if mode == 'inner':
foreground_image = (label_img != background)
boundaries &= foreground_image
elif mode == 'outer':
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) !=
erosion(inverted_background, selem)) &
~background_image)
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
def mark_boundaries(image, label_img, color=(1, 1, 0),
outline_color=None, mode='outer', background_label=0):
"""Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
"""
marked = img_as_float(image, force_copy=True)
if marked.ndim == 2:
marked = gray2rgb(marked)
if mode == 'subpixel':
# Here, we want to interpose an extra line of pixels between
# each original line - except for the last axis which holds
# the RGB information. ``ndi.zoom`` then performs the (cubic)
# interpolation, filling in the values of the interposed pixels
marked = ndi.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],
mode='reflect')
boundaries = find_boundaries(label_img, mode=mode,
background=background_label)
if outline_color is not None:
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
| 43.030172
| 83
| 0.54102
|
from __future__ import division
import numpy as np
from scipy import ndimage as ndi
from ..morphology import dilation, erosion, square
from ..util import img_as_float, view_as_windows
from ..color import gray2rgb
def _find_boundaries_subpixel(label_img):
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],
label_img.dtype)
pixels = (slice(None, None, 2), ) * ndim
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1,
mode='constant', constant_values=0),
(3,) * ndim)
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if len(values) > 2:
boundaries[index] = True
return boundaries
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
if label_img.dtype == 'bool':
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if mode != 'subpixel':
boundaries = dilation(label_img, selem) != erosion(label_img, selem)
if mode == 'inner':
foreground_image = (label_img != background)
boundaries &= foreground_image
elif mode == 'outer':
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) !=
erosion(inverted_background, selem)) &
~background_image)
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
def mark_boundaries(image, label_img, color=(1, 1, 0),
outline_color=None, mode='outer', background_label=0):
marked = img_as_float(image, force_copy=True)
if marked.ndim == 2:
marked = gray2rgb(marked)
if mode == 'subpixel':
marked = ndi.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],
mode='reflect')
boundaries = find_boundaries(label_img, mode=mode,
background=background_label)
if outline_color is not None:
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
| true
| true
|
7905a9f05612e87150b5fddeb7b0bb5e45bc8397
| 1,882
|
py
|
Python
|
network/r2unet.py
|
nitsaick/pytorch-kit
|
ebbbc228e2dbae37a055de0d40580140d5a51613
|
[
"MIT"
] | null | null | null |
network/r2unet.py
|
nitsaick/pytorch-kit
|
ebbbc228e2dbae37a055de0d40580140d5a51613
|
[
"MIT"
] | null | null | null |
network/r2unet.py
|
nitsaick/pytorch-kit
|
ebbbc228e2dbae37a055de0d40580140d5a51613
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from .subnet import DoubleConv, UpConv, RRCU
class R2UNet(nn.Module):
def __init__(self, in_ch, out_ch, base_ch=64):
super(R2UNet, self).__init__()
self.inc = DoubleConv(in_ch, base_ch)
self.down = nn.MaxPool2d(kernel_size=2, stride=2)
self.down1 = self._Down(base_ch, base_ch * 2)
self.down2 = self._Down(base_ch * 2, base_ch * 4)
self.down3 = self._Down(base_ch * 4, base_ch * 8)
self.down4 = self._Down(base_ch * 8, base_ch * 16)
self.up1 = self._Up(base_ch * 16, base_ch * 8)
self.up2 = self._Up(base_ch * 8, base_ch * 4)
self.up3 = self._Up(base_ch * 4, base_ch * 2)
self.up4 = self._Up(base_ch * 2, base_ch)
self.outc = nn.Conv2d(base_ch, out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class _Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Down, self).__init__()
self.down = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
RRCU(in_ch, out_ch)
)
def forward(self, x):
x = self.down(x)
return x
class _Up(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Up, self).__init__()
self.up = UpConv(in_ch, out_ch)
self.conv = RRCU(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
| 31.898305
| 82
| 0.529756
|
import torch
import torch.nn as nn
from .subnet import DoubleConv, UpConv, RRCU
class R2UNet(nn.Module):
def __init__(self, in_ch, out_ch, base_ch=64):
super(R2UNet, self).__init__()
self.inc = DoubleConv(in_ch, base_ch)
self.down = nn.MaxPool2d(kernel_size=2, stride=2)
self.down1 = self._Down(base_ch, base_ch * 2)
self.down2 = self._Down(base_ch * 2, base_ch * 4)
self.down3 = self._Down(base_ch * 4, base_ch * 8)
self.down4 = self._Down(base_ch * 8, base_ch * 16)
self.up1 = self._Up(base_ch * 16, base_ch * 8)
self.up2 = self._Up(base_ch * 8, base_ch * 4)
self.up3 = self._Up(base_ch * 4, base_ch * 2)
self.up4 = self._Up(base_ch * 2, base_ch)
self.outc = nn.Conv2d(base_ch, out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class _Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Down, self).__init__()
self.down = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
RRCU(in_ch, out_ch)
)
def forward(self, x):
x = self.down(x)
return x
class _Up(nn.Module):
def __init__(self, in_ch, out_ch):
super(R2UNet._Up, self).__init__()
self.up = UpConv(in_ch, out_ch)
self.conv = RRCU(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
| true
| true
|
7905aabd38f2235115baeb561706b6e995761b90
| 350
|
py
|
Python
|
tests/RST304/sphinx-roles.py
|
xmo-odoo/flake8-rst-docstrings
|
1a6296bbd384584a013e1782ccdc7b260ff9f392
|
[
"MIT"
] | 39
|
2017-06-16T16:16:51.000Z
|
2022-03-24T09:12:18.000Z
|
tests/RST304/sphinx-roles.py
|
xmo-odoo/flake8-rst-docstrings
|
1a6296bbd384584a013e1782ccdc7b260ff9f392
|
[
"MIT"
] | 47
|
2017-08-07T13:40:15.000Z
|
2021-12-09T15:43:49.000Z
|
tests/RST304/sphinx-roles.py
|
xmo-odoo/flake8-rst-docstrings
|
1a6296bbd384584a013e1782ccdc7b260ff9f392
|
[
"MIT"
] | 7
|
2019-04-22T21:06:02.000Z
|
2021-08-29T02:52:43.000Z
|
"""Example reStructuredText from Sphinx-Needs project.
From http://sphinxcontrib-needs.readthedocs.io/en/latest/
but will not work in isolation - cut down just to trigger
RST304.
**Some text**
Wohooo, we have created :need:`req_001`,
which is linked by :need_incoming:`req_001`.
"""
print("sphinx-needs defines its own reStructuredText roles.")
| 23.333333
| 61
| 0.76
|
print("sphinx-needs defines its own reStructuredText roles.")
| true
| true
|
7905ab6d01b3adadea9b58403c0e70acfcba205c
| 15,704
|
py
|
Python
|
src/mtl_trainer.py
|
Daupler/CA-MTL
|
d417b039dee973e32f42ba5c1c346738cd29ab3c
|
[
"MIT"
] | null | null | null |
src/mtl_trainer.py
|
Daupler/CA-MTL
|
d417b039dee973e32f42ba5c1c346738cd29ab3c
|
[
"MIT"
] | 1
|
2021-04-21T14:40:27.000Z
|
2021-04-21T14:40:27.000Z
|
src/mtl_trainer.py
|
Daupler/CA-MTL
|
d417b039dee973e32f42ba5c1c346738cd29ab3c
|
[
"MIT"
] | null | null | null |
import os
import json
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Callable
import torch
import wandb
import numpy as np
from tqdm.auto import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from transformers import (
Trainer,
TrainingArguments,
EvalPrediction,
DataCollator,
DefaultDataCollator,
)
from transformers.trainer_utils import PredictionOutput
from transformers.training_args import is_tpu_available
from src.data.task_data_processors import task_output_modes
from src.data.data_utils import compute_task_metrics
if is_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
logger = logging.getLogger(__name__)
@dataclass
class MultiTaskTrainingArguments(TrainingArguments):
use_mt_uncertainty: bool = field(
default=False,
metadata={"help": "Use MT-Uncertainty sampling method"},
)
uniform_mt_sampling: bool = field(
default=False,
metadata={"help": "Sample each task an equal amount to times per epoch."},
)
percent_of_max_data_size: float = field(
default=1.0,
metadata={
"help": "If uniform_mt_sampling=True, specify the samples per task per "
"epoch based on the maximum dataset length. If below 0.0 or above 1.0,"
"it will be set to the closest of 0.0 or 1.0."
},
)
class MultiTaskTrainer(Trainer):
def __init__(
self,
tokenizer,
data_args,
eval_datasets=None,
test_datasets=None,
*args,
**kwargs,
):
super(MultiTaskTrainer, self).__init__(*args, **kwargs)
self.tokenizer = tokenizer
self.data_args = data_args
self.eval_datasets = eval_datasets
self.test_datasets = test_datasets
# self.data_collator = DefaultDataCollator()
def get_train_dataloader(self) -> DataLoader:
if self.args.use_mt_uncertainty:
return self._create_custom_dataloader()
else:
return super().get_train_dataloader()
def _create_custom_dataloader(self):
class MtUcertaintyIterator:
"""Sample tasks using uncertainty measure."""
def __init__(self, my_loader):
self.my_loader = my_loader
self.loader_iters = [iter(loader) for loader in self.my_loader.loaders]
self.loader_iter_sizes = [len(i) for i in self.loader_iters]
self.max_count = len(self.my_loader)
self.batch_count = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_count == self.max_count:
self.batch_count = 0
raise StopIteration()
test_batch = {}
for idx, loader_iter in enumerate(self.loader_iters):
try:
batch = loader_iter.__next__()
except StopIteration:
new_loader_iter = iter(self.my_loader.loaders[idx])
self.loader_iters[idx] = new_loader_iter
batch = new_loader_iter.__next__()
test_batch = self.batchify_data(batch, test_batch)
inputs = {}
for k, v in test_batch.items():
if k not in ["labels"]:
inputs[k] = v.detach().to(self.my_loader.args.device)
with torch.no_grad():
model.select_batch_mode = True
outputs = model(**inputs)
model.select_batch_mode = False
(
test_batch_entropy,
test_batch_entropy_mean,
max_mean_batch_entropy,
) = outputs[-3:]
for _, v in inputs.items():
del v # free GPU mem
del inputs
test_batch_entropy_mean = (
test_batch_entropy_mean / max_mean_batch_entropy
)
test_batch_entropy = test_batch_entropy * test_batch_entropy_mean
select_size = min(
self.my_loader.args.train_batch_size,
test_batch["input_ids"].shape[0],
) # Handled the last batch if it is lower than the batch size
top_entropy = torch.topk(test_batch_entropy, select_size)
for k, v in test_batch.items():
test_batch[k] = torch.index_select(v, 0, top_entropy.indices)
self.batch_count += 1
return test_batch
@staticmethod
def batchify_data(data, curr_batch):
for k in data.keys():
if k in curr_batch.keys():
curr_batch[k] = torch.cat((curr_batch[k], data[k]), dim=0)
else:
curr_batch[k] = data[k]
return curr_batch
class CustomLoader:
def __init__(self, loaders, datasets, loader_args):
self.loaders = loaders
self.dataset = datasets
self.args = loader_args
self.current_epoch = 0
def __iter__(self):
iterator = MtUcertaintyIterator(self)
# for determinism across runs
# https://github.com/pytorch/examples/issues/501
for l in self.loaders:
if isinstance(l.sampler, DistributedSampler):
l.sampler.set_epoch(self.current_epoch)
self.current_epoch += 1
return iterator
def __len__(self):
loader_len = [len(loader) for loader in self.loaders]
if self.args.uniform_mt_sampling:
return int(
self.args.percent_of_max_data_size
* max(loader_len)
* len(self.loaders)
/ self.args.train_batch_size
)
elif self.args.uncert_batch:
return int(
max(loader_len)
* len(self.loaders)
* self.args.percent_of_max_data_size
)
else:
return sum(loader_len)
model = self.model
tasks = self.data_args.tasks
data_loaders = []
for dataset in self.train_dataset.datasets:
train_sampler = (
RandomSampler(dataset)
if self.args.local_rank == -1
else DistributedSampler(dataset)
)
data_loader = DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
data_loaders.append(data_loader)
return CustomLoader(data_loaders, self.train_dataset, self.args)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
context: str = None,
do_test_if_needed: bool = True,
):
datasets = eval_dataset or self.eval_datasets
logger.info("*** Evaluate on dev ***")
for task_name, eval_dataset in datasets.items():
logger.info(task_name)
self.compute_metrics = self.build_compute_metrics_fn(eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_result = self._prediction_loop(
eval_dataloader, description="Evaluation", task_name=task_name,
mode=eval_dataset.mode)
self._log(eval_result.metrics)
for key, value in eval_result.metrics.items():
logger.info(" %s = %s", key, value)
if self.args.tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
def predict(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
scoring_model: Optional[str] = None
):
logging.info("*** Test ***")
datasets = eval_dataset or self.test_datasets
for task_name, test_dataset in datasets.items():
logger.info(task_name)
test_dataloader = self.get_test_dataloader(test_dataset)
test_result = self._prediction_loop(
test_dataloader, description="Prediction", task_name=task_name,
mode=test_dataset.mode)
self._log(test_result.metrics)
for key, value in test_result.metrics.items():
logger.info(" %s = %s", key, value)
softmax = torch.nn.Softmax(dim=1)
probs = softmax(torch.Tensor(test_result.predictions)).numpy().astype('float64')
logits = test_result.predictions.astype('float64')
output_mode = task_output_modes[task_name]
if output_mode == "classification":
predictions = np.argmax(logits, axis=1)
self.run_name = wandb.run.name
output_test_file = os.path.join(
self.args.output_dir,
f"{task_name}_test_iter_{self.run_name}.tsv",
)
if scoring_model is None:
scoring_model = self.run_name
if self.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(task_name))
logger.info("***** Writing as {} *****".format(self.run_name))
if output_mode == "regression":
writer.write("index\tprediction\n")
else:
writer.write("index\tscoring_model\tprediction\tprobability\tlogits\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
i_probs = probs[index,:]
i_logits = logits[index,:]
i_logits = json.dumps(dict(zip(test_dataset.get_labels(), i_logits)))
writer.write(
"%d\t%s\t%s\t%3.6f\t%s\n" % (
index, scoring_model, test_dataset.get_labels()[item],
i_probs[item], i_logits)
)
def _prediction_loop(
self, dataloader: DataLoader, description: str, task_name: str, mode: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader,
[self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(
inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds,
num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids,
num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics[f"{task_name}_{mode}_loss"] = np.mean(eval_losses)
# Prefix all keys with {task_name}_{model}_
for key in list(metrics.keys()):
if not key.startswith(f"{task_name}_{mode}_"):
metrics[f"{task_name}_{mode}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
@staticmethod
def build_compute_metrics_fn(
eval_dataset
) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
return compute_task_metrics(eval_dataset.task_name, p)
return compute_metrics_fn
| 38.967742
| 118
| 0.550369
|
import os
import json
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Callable
import torch
import wandb
import numpy as np
from tqdm.auto import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from transformers import (
Trainer,
TrainingArguments,
EvalPrediction,
DataCollator,
DefaultDataCollator,
)
from transformers.trainer_utils import PredictionOutput
from transformers.training_args import is_tpu_available
from src.data.task_data_processors import task_output_modes
from src.data.data_utils import compute_task_metrics
if is_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
logger = logging.getLogger(__name__)
@dataclass
class MultiTaskTrainingArguments(TrainingArguments):
use_mt_uncertainty: bool = field(
default=False,
metadata={"help": "Use MT-Uncertainty sampling method"},
)
uniform_mt_sampling: bool = field(
default=False,
metadata={"help": "Sample each task an equal amount to times per epoch."},
)
percent_of_max_data_size: float = field(
default=1.0,
metadata={
"help": "If uniform_mt_sampling=True, specify the samples per task per "
"epoch based on the maximum dataset length. If below 0.0 or above 1.0,"
"it will be set to the closest of 0.0 or 1.0."
},
)
class MultiTaskTrainer(Trainer):
def __init__(
self,
tokenizer,
data_args,
eval_datasets=None,
test_datasets=None,
*args,
**kwargs,
):
super(MultiTaskTrainer, self).__init__(*args, **kwargs)
self.tokenizer = tokenizer
self.data_args = data_args
self.eval_datasets = eval_datasets
self.test_datasets = test_datasets
def get_train_dataloader(self) -> DataLoader:
if self.args.use_mt_uncertainty:
return self._create_custom_dataloader()
else:
return super().get_train_dataloader()
def _create_custom_dataloader(self):
class MtUcertaintyIterator:
def __init__(self, my_loader):
self.my_loader = my_loader
self.loader_iters = [iter(loader) for loader in self.my_loader.loaders]
self.loader_iter_sizes = [len(i) for i in self.loader_iters]
self.max_count = len(self.my_loader)
self.batch_count = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_count == self.max_count:
self.batch_count = 0
raise StopIteration()
test_batch = {}
for idx, loader_iter in enumerate(self.loader_iters):
try:
batch = loader_iter.__next__()
except StopIteration:
new_loader_iter = iter(self.my_loader.loaders[idx])
self.loader_iters[idx] = new_loader_iter
batch = new_loader_iter.__next__()
test_batch = self.batchify_data(batch, test_batch)
inputs = {}
for k, v in test_batch.items():
if k not in ["labels"]:
inputs[k] = v.detach().to(self.my_loader.args.device)
with torch.no_grad():
model.select_batch_mode = True
outputs = model(**inputs)
model.select_batch_mode = False
(
test_batch_entropy,
test_batch_entropy_mean,
max_mean_batch_entropy,
) = outputs[-3:]
for _, v in inputs.items():
del v
del inputs
test_batch_entropy_mean = (
test_batch_entropy_mean / max_mean_batch_entropy
)
test_batch_entropy = test_batch_entropy * test_batch_entropy_mean
select_size = min(
self.my_loader.args.train_batch_size,
test_batch["input_ids"].shape[0],
)
top_entropy = torch.topk(test_batch_entropy, select_size)
for k, v in test_batch.items():
test_batch[k] = torch.index_select(v, 0, top_entropy.indices)
self.batch_count += 1
return test_batch
@staticmethod
def batchify_data(data, curr_batch):
for k in data.keys():
if k in curr_batch.keys():
curr_batch[k] = torch.cat((curr_batch[k], data[k]), dim=0)
else:
curr_batch[k] = data[k]
return curr_batch
class CustomLoader:
def __init__(self, loaders, datasets, loader_args):
self.loaders = loaders
self.dataset = datasets
self.args = loader_args
self.current_epoch = 0
def __iter__(self):
iterator = MtUcertaintyIterator(self)
for l in self.loaders:
if isinstance(l.sampler, DistributedSampler):
l.sampler.set_epoch(self.current_epoch)
self.current_epoch += 1
return iterator
def __len__(self):
loader_len = [len(loader) for loader in self.loaders]
if self.args.uniform_mt_sampling:
return int(
self.args.percent_of_max_data_size
* max(loader_len)
* len(self.loaders)
/ self.args.train_batch_size
)
elif self.args.uncert_batch:
return int(
max(loader_len)
* len(self.loaders)
* self.args.percent_of_max_data_size
)
else:
return sum(loader_len)
model = self.model
tasks = self.data_args.tasks
data_loaders = []
for dataset in self.train_dataset.datasets:
train_sampler = (
RandomSampler(dataset)
if self.args.local_rank == -1
else DistributedSampler(dataset)
)
data_loader = DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
data_loaders.append(data_loader)
return CustomLoader(data_loaders, self.train_dataset, self.args)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
context: str = None,
do_test_if_needed: bool = True,
):
datasets = eval_dataset or self.eval_datasets
logger.info("*** Evaluate on dev ***")
for task_name, eval_dataset in datasets.items():
logger.info(task_name)
self.compute_metrics = self.build_compute_metrics_fn(eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_result = self._prediction_loop(
eval_dataloader, description="Evaluation", task_name=task_name,
mode=eval_dataset.mode)
self._log(eval_result.metrics)
for key, value in eval_result.metrics.items():
logger.info(" %s = %s", key, value)
if self.args.tpu_metrics_debug:
xm.master_print(met.metrics_report())
def predict(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
scoring_model: Optional[str] = None
):
logging.info("*** Test ***")
datasets = eval_dataset or self.test_datasets
for task_name, test_dataset in datasets.items():
logger.info(task_name)
test_dataloader = self.get_test_dataloader(test_dataset)
test_result = self._prediction_loop(
test_dataloader, description="Prediction", task_name=task_name,
mode=test_dataset.mode)
self._log(test_result.metrics)
for key, value in test_result.metrics.items():
logger.info(" %s = %s", key, value)
softmax = torch.nn.Softmax(dim=1)
probs = softmax(torch.Tensor(test_result.predictions)).numpy().astype('float64')
logits = test_result.predictions.astype('float64')
output_mode = task_output_modes[task_name]
if output_mode == "classification":
predictions = np.argmax(logits, axis=1)
self.run_name = wandb.run.name
output_test_file = os.path.join(
self.args.output_dir,
f"{task_name}_test_iter_{self.run_name}.tsv",
)
if scoring_model is None:
scoring_model = self.run_name
if self.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(task_name))
logger.info("***** Writing as {} *****".format(self.run_name))
if output_mode == "regression":
writer.write("index\tprediction\n")
else:
writer.write("index\tscoring_model\tprediction\tprobability\tlogits\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
i_probs = probs[index,:]
i_logits = logits[index,:]
i_logits = json.dumps(dict(zip(test_dataset.get_labels(), i_logits)))
writer.write(
"%d\t%s\t%s\t%3.6f\t%s\n" % (
index, scoring_model, test_dataset.get_labels()[item],
i_probs[item], i_logits)
)
def _prediction_loop(
self, dataloader: DataLoader, description: str, task_name: str, mode: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader,
[self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(
inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
if preds is not None:
preds = self.distributed_concat(preds,
num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids,
num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics[f"{task_name}_{mode}_loss"] = np.mean(eval_losses)
for key in list(metrics.keys()):
if not key.startswith(f"{task_name}_{mode}_"):
metrics[f"{task_name}_{mode}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
@staticmethod
def build_compute_metrics_fn(
eval_dataset
) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
return compute_task_metrics(eval_dataset.task_name, p)
return compute_metrics_fn
| true
| true
|
7905abd81c34c0bde58188ec1e551bd22db1d430
| 4,454
|
py
|
Python
|
indicator/config.py
|
amitkumarj441/Cryptocoin-Price-Indicator
|
01ee6d91eb49537aca06f16cc5faad755947332d
|
[
"Apache-2.0"
] | null | null | null |
indicator/config.py
|
amitkumarj441/Cryptocoin-Price-Indicator
|
01ee6d91eb49537aca06f16cc5faad755947332d
|
[
"Apache-2.0"
] | null | null | null |
indicator/config.py
|
amitkumarj441/Cryptocoin-Price-Indicator
|
01ee6d91eb49537aca06f16cc5faad755947332d
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
CONFIG_FILE_PATH = os.path.expanduser("~/.coinbase-indicator")
GENERAL_OPTION_KEY = 'general'
OPTION_KEY_LARGE_LABEL = 'show_crypto_currency_in_the_label'
OPTION_KEY_NOTIFICATION = 'show_notifications'
OPTION_KEY_THEME_MONOCHROME = 'theme_monochrome'
CRYPTO_CURRENCY_OPTION_KEY = 'crypto_currency'
OPTION_KEY_CRYPTO_CURRENCY_SHOW = 'show_exchange_price'
class Option(object):
def __init__(self, status, label):
self.status = status
self.label = label
def get_label(self):
return self.label
def get_status(self):
return self.status
def set_status(self, status):
self.status = status
class Config(object):
def __init__(self):
self.general_options = {
OPTION_KEY_LARGE_LABEL: Option(False, self.__get_label(OPTION_KEY_LARGE_LABEL)),
OPTION_KEY_NOTIFICATION: Option(True, self.__get_label(OPTION_KEY_NOTIFICATION)),
OPTION_KEY_THEME_MONOCHROME: Option(True, self.__get_label(OPTION_KEY_THEME_MONOCHROME)),
}
self.crypto_currency_options = {}
def set_crypto_currencies_options(self, crypto_currencies):
for crypto_currency in crypto_currencies:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {
OPTION_KEY_CRYPTO_CURRENCY_SHOW: Option(False, self.__get_label(OPTION_KEY_CRYPTO_CURRENCY_SHOW)),
}
def load(self):
if not os.path.isfile(CONFIG_FILE_PATH):
return
with open(CONFIG_FILE_PATH, 'r') as config_file:
config_dict = json.load(config_file)
if GENERAL_OPTION_KEY in config_dict:
for option_key in config_dict[GENERAL_OPTION_KEY]:
self.general_options[option_key] = Option(config_dict[GENERAL_OPTION_KEY][option_key], self.__get_label(option_key))
if CRYPTO_CURRENCY_OPTION_KEY in config_dict:
for crypto_currency in config_dict[CRYPTO_CURRENCY_OPTION_KEY]:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {}
for option_key in config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency]:
self.crypto_currency_options[crypto_currency][option_key] = Option(config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key], self.__get_label(option_key))
def persist(self):
config_dict = {
GENERAL_OPTION_KEY: {},
CRYPTO_CURRENCY_OPTION_KEY: {},
}
for option_key in self.general_options:
config_dict[GENERAL_OPTION_KEY][option_key] = self.general_options[option_key].get_status()
for crypto_currency in self.crypto_currency_options:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency] = {}
for option_key in self.crypto_currency_options[crypto_currency]:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key] = self.crypto_currency_options[crypto_currency][option_key].get_status()
with open(CONFIG_FILE_PATH, 'w') as config_file:
json.dump(config_dict, config_file)
def get_crypto_currency_options(self):
return self.crypto_currency_options
def get_general_options(self):
return self.general_options
def is_crypto_currency_visible(self, crypto_currency):
return \
crypto_currency in self.crypto_currency_options \
and OPTION_KEY_CRYPTO_CURRENCY_SHOW in self.crypto_currency_options[crypto_currency] \
and self.crypto_currency_options[crypto_currency][OPTION_KEY_CRYPTO_CURRENCY_SHOW].get_status()
def is_theme_monochrome(self):
return \
OPTION_KEY_THEME_MONOCHROME in self.general_options \
and self.general_options[OPTION_KEY_THEME_MONOCHROME].get_status()
def is_notification_visible(self):
return \
OPTION_KEY_NOTIFICATION in self.general_options \
and self.general_options[OPTION_KEY_NOTIFICATION].get_status()
def is_large_label_visible(self):
return \
OPTION_KEY_LARGE_LABEL in self.general_options \
and self.general_options[OPTION_KEY_LARGE_LABEL].get_status()
@staticmethod
def __get_label(key):
label = key.replace('_', ' ')
return label[:1].upper() + label[1:]
| 39.415929
| 186
| 0.705658
|
import os
import json
CONFIG_FILE_PATH = os.path.expanduser("~/.coinbase-indicator")
GENERAL_OPTION_KEY = 'general'
OPTION_KEY_LARGE_LABEL = 'show_crypto_currency_in_the_label'
OPTION_KEY_NOTIFICATION = 'show_notifications'
OPTION_KEY_THEME_MONOCHROME = 'theme_monochrome'
CRYPTO_CURRENCY_OPTION_KEY = 'crypto_currency'
OPTION_KEY_CRYPTO_CURRENCY_SHOW = 'show_exchange_price'
class Option(object):
def __init__(self, status, label):
self.status = status
self.label = label
def get_label(self):
return self.label
def get_status(self):
return self.status
def set_status(self, status):
self.status = status
class Config(object):
def __init__(self):
self.general_options = {
OPTION_KEY_LARGE_LABEL: Option(False, self.__get_label(OPTION_KEY_LARGE_LABEL)),
OPTION_KEY_NOTIFICATION: Option(True, self.__get_label(OPTION_KEY_NOTIFICATION)),
OPTION_KEY_THEME_MONOCHROME: Option(True, self.__get_label(OPTION_KEY_THEME_MONOCHROME)),
}
self.crypto_currency_options = {}
def set_crypto_currencies_options(self, crypto_currencies):
for crypto_currency in crypto_currencies:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {
OPTION_KEY_CRYPTO_CURRENCY_SHOW: Option(False, self.__get_label(OPTION_KEY_CRYPTO_CURRENCY_SHOW)),
}
def load(self):
if not os.path.isfile(CONFIG_FILE_PATH):
return
with open(CONFIG_FILE_PATH, 'r') as config_file:
config_dict = json.load(config_file)
if GENERAL_OPTION_KEY in config_dict:
for option_key in config_dict[GENERAL_OPTION_KEY]:
self.general_options[option_key] = Option(config_dict[GENERAL_OPTION_KEY][option_key], self.__get_label(option_key))
if CRYPTO_CURRENCY_OPTION_KEY in config_dict:
for crypto_currency in config_dict[CRYPTO_CURRENCY_OPTION_KEY]:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {}
for option_key in config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency]:
self.crypto_currency_options[crypto_currency][option_key] = Option(config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key], self.__get_label(option_key))
def persist(self):
config_dict = {
GENERAL_OPTION_KEY: {},
CRYPTO_CURRENCY_OPTION_KEY: {},
}
for option_key in self.general_options:
config_dict[GENERAL_OPTION_KEY][option_key] = self.general_options[option_key].get_status()
for crypto_currency in self.crypto_currency_options:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency] = {}
for option_key in self.crypto_currency_options[crypto_currency]:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key] = self.crypto_currency_options[crypto_currency][option_key].get_status()
with open(CONFIG_FILE_PATH, 'w') as config_file:
json.dump(config_dict, config_file)
def get_crypto_currency_options(self):
return self.crypto_currency_options
def get_general_options(self):
return self.general_options
def is_crypto_currency_visible(self, crypto_currency):
return \
crypto_currency in self.crypto_currency_options \
and OPTION_KEY_CRYPTO_CURRENCY_SHOW in self.crypto_currency_options[crypto_currency] \
and self.crypto_currency_options[crypto_currency][OPTION_KEY_CRYPTO_CURRENCY_SHOW].get_status()
def is_theme_monochrome(self):
return \
OPTION_KEY_THEME_MONOCHROME in self.general_options \
and self.general_options[OPTION_KEY_THEME_MONOCHROME].get_status()
def is_notification_visible(self):
return \
OPTION_KEY_NOTIFICATION in self.general_options \
and self.general_options[OPTION_KEY_NOTIFICATION].get_status()
def is_large_label_visible(self):
return \
OPTION_KEY_LARGE_LABEL in self.general_options \
and self.general_options[OPTION_KEY_LARGE_LABEL].get_status()
@staticmethod
def __get_label(key):
label = key.replace('_', ' ')
return label[:1].upper() + label[1:]
| true
| true
|
7905abd893d642df6c3947ec6a8c17887dc7f6bc
| 3,080
|
py
|
Python
|
data/p2DJ/New/program/qiskit/class/startQiskit_Class137.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class137.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class137.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=8
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.x(input_qubit[1]) # number=5
prog.z(input_qubit[1]) # number=4
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class137.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.256881
| 80
| 0.620779
|
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
prog.x(target)
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1])
prog.h(target)
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
prog.swap(input_qubit[1],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
prog.x(input_qubit[1])
prog.z(input_qubit[1])
prog.swap(input_qubit[1],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class137.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
7905ac76dfee129f62d3deb642e572b410d1f67a
| 1,518
|
py
|
Python
|
uni_ticket/migrations/0031_auto_20190521_1229.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
uni_ticket/migrations/0031_auto_20190521_1229.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
uni_ticket/migrations/0031_auto_20190521_1229.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-21 10:29
from django.db import migrations, models
import uni_ticket.models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0030_auto_20190520_1532'),
]
operations = [
migrations.AlterField(
model_name='ticketcategoryinputlist',
name='input_type',
field=models.CharField(choices=[('CustomSignedP7MField', 'Allegato P7M firmato'), ('CustomFileField', 'Allegato PDF'), ('CustomSignedFileField', 'Allegato PDF'), ('CustomSignedPdfField', 'Allegato PDF firmato'), ('CustomHiddenField', 'Campo nascosto'), ('CheckBoxField', 'Checkbox'), ('BaseDateField', 'Data'), ('BaseDateTimeField', 'Data e Ora'), ('DateStartEndComplexField', 'Data inizio e Data fine'), ('DurataComeInteroField', 'Durata come numero intero (anni,mesi,ore)'), ('CustomComplexTableField', 'Inserimenti multipli'), ('CustomRadioBoxField', 'Lista di opzioni (checkbox)'), ('CustomSelectBoxField', 'Lista di opzioni (tendina)'), ('PositiveFloatField', 'Numero con virgola positivo'), ('PositiveIntegerField', 'Numero intero positivo'), ('ProtocolloField', 'Protocollo (tipo/numero/data)'), ('CustomCharField', 'Testo'), ('TextAreaField', 'Testo lungo')], max_length=33),
),
migrations.AlterField(
model_name='ticketreply',
name='attachment',
field=models.FileField(blank=True, default=None, null=True, upload_to=uni_ticket.models._reply_attachment_upload),
),
]
| 60.72
| 895
| 0.690382
|
from django.db import migrations, models
import uni_ticket.models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0030_auto_20190520_1532'),
]
operations = [
migrations.AlterField(
model_name='ticketcategoryinputlist',
name='input_type',
field=models.CharField(choices=[('CustomSignedP7MField', 'Allegato P7M firmato'), ('CustomFileField', 'Allegato PDF'), ('CustomSignedFileField', 'Allegato PDF'), ('CustomSignedPdfField', 'Allegato PDF firmato'), ('CustomHiddenField', 'Campo nascosto'), ('CheckBoxField', 'Checkbox'), ('BaseDateField', 'Data'), ('BaseDateTimeField', 'Data e Ora'), ('DateStartEndComplexField', 'Data inizio e Data fine'), ('DurataComeInteroField', 'Durata come numero intero (anni,mesi,ore)'), ('CustomComplexTableField', 'Inserimenti multipli'), ('CustomRadioBoxField', 'Lista di opzioni (checkbox)'), ('CustomSelectBoxField', 'Lista di opzioni (tendina)'), ('PositiveFloatField', 'Numero con virgola positivo'), ('PositiveIntegerField', 'Numero intero positivo'), ('ProtocolloField', 'Protocollo (tipo/numero/data)'), ('CustomCharField', 'Testo'), ('TextAreaField', 'Testo lungo')], max_length=33),
),
migrations.AlterField(
model_name='ticketreply',
name='attachment',
field=models.FileField(blank=True, default=None, null=True, upload_to=uni_ticket.models._reply_attachment_upload),
),
]
| true
| true
|
7905ae4352442629717b7290801d8937671ce02e
| 19,189
|
py
|
Python
|
breaker_audio/component_cmn/synthesizer/hparams.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
breaker_audio/component_cmn/synthesizer/hparams.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
breaker_audio/component_cmn/synthesizer/hparams.py
|
kozzion/breaker_audio
|
0f27b3ae581fbeb8f79d0b8755a139f7438ca02b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# from tensorflow.contrib.training import HParams
# from aukit.audio_io import Dict2Obj
from dotmap import DotMap
import json
class Dict2Obj(DotMap):
"""
修正DotMap的get方法生成DotMap对象的bug。
Dict2Obj的get方法和dict的get功能相同。
"""
def __getitem__(self, k):
if k not in self._map:
return None
else:
return self._map[k]
def parse(self, json_string):
if json_string.strip():
_hp = json.loads(json_string)
for k, v in _hp.items():
self[k] = v
return self
one = 64
# Default hyperparameters
hparams = Dict2Obj(dict(
encoder_path=r"../models/encoder/saved_models/ge2e_pretrained.pt",
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners="chinese_cleaners",
center=True,
# If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the
# GPU idx on run. example:
# expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py
# --model="Tacotron" --hparams="tacotron_gpu_start_idx=2"
# If you want to train on multiple GPUs, simply specify the number of GPUs available,
# and the idx of the first GPU to use. example:
# example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py
# --model="Tacotron" --hparams="tacotron_num_gpus=4, tacotron_gpu_start_idx=0"
# The hparams arguments can be directly modified on this hparams.py file instead of being
# specified on run if preferred!
# If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be
# trained on True mel spectrograms), one needs to specify different GPU idxes.
# example Tacotron+WaveNet on a machine with 4 or plus GPUs. Two GPUs for each model:
# CUDA_VISIBLE_DEVICES=0,1 python train.py --model="Tacotron"
# --hparams="tacotron_gpu_start_idx=0, tacotron_num_gpus=2"
# Cuda_VISIBLE_DEVICES=2,3 python train.py --model="WaveNet"
# --hparams="wavenet_gpu_start_idx=2; wavenet_num_gpus=2"
# IMPORTANT NOTE: If using N GPUs, please multiply the tacotron_batch_size by N below in the
# hparams! (tacotron_batch_size = 32 * N)
# Never use lower batch size than 32 on a single GPU!
# Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than
# 8 if GPU is having OOM, minimum 2)
# Please also apply the synthesis batch size modification likewise. (if N GPUs are used for
# synthesis, minimal batch size must be N, minimum of 1 sample per GPU)
# We did not add an automatic multi-GPU batch size computation to avoid confusion in the
# user"s mind and to provide more control to the user for
# resources related decisions.
# Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a
# little faster than the original
# pipeline for a single GPU as well. Great work!
# Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Tacotron only for now!
# WaveNet does not support multi GPU yet, WIP)
# Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_gpu_start_idx=0, # idx of the first GPU to be used for Tacotron training.
tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training.
split_on_cpu=True,
# Determines whether to split data on CPU or on first GPU. This is automatically True when
# more than 1 GPU is used.
###########################################################################################################################################
# Audio
# Audio parameters are the most important parameters to tune when using this work on your
# personal data. Below are the beginner steps to adapt
# this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how
# many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz,
# so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms
# window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto
# example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after
# win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen
# however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 +
# 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto
# example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only
# keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] *
# hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the
# parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying
# preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That
# will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing
# trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are
# showing black silent regions on top), then restart from step 2.
inv_mel_basis=None,
mel_basis=None,
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
# network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.9, # Rescaling value
# Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
# train samples of lengths between 3sec and 14sec are more than enough to make a model capable
# of good parallelization.
clip_mels_length=True,
# For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors,
# also consider clipping your samples to smaller chunks)
max_mel_frames=900,
# Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3
# and still getting OOM errors.
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_ffit is not multiple of hop_size!!
use_lws=False,
# Only used to set as True if using WaveNet, no difference in performance is observed in
# either cases.
silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing
# Mel spectrogram
n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i <filename>)
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
# M-AILABS (and other datasets) trim params (these parameters are usually correct for any
# data, but definitely must be tuned for specific speakers)
trim_fft_size=512,
trim_hop_size=128,
trim_top_db=23,
# Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization=True,
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
symmetric_mels=True,
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
# faster and cleaner convergence)
max_abs_value=4.,
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
# be too big to avoid gradient explosion,
# not too small for fast convergence)
normalize_for_wavenet=True,
# whether to rescale to [0, 1] for wavenet. (better audio quality)
clip_for_wavenet=True,
# whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)
# Contribution by @begeekmyfriend
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
# levels. Also allows for better G&L phase reconstruction)
preemphasize=True, # whether to apply filter
preemphasis=0.97, # filter coefficient.
# Limits
min_level_db=-100,
ref_level_db=20,
fmin=55,
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax=7600, # To be increased/reduced depending on data.
# Griffin Lim
power=1.5,
# Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.
griffin_lim_iters=30, # 60,
# Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.
###########################################################################################################################################
# Tacotron
outputs_per_step=2, # Was 1
# number of frames to generate at each decoding step (increase to speed up computation and
# allows for higher batch size, decreases G&L audio quality)
stop_at_any=True,
# Determines whether the decoder should stop when predicting <stop> to any frame or to all of
# them (True works pretty well)
embedding_dim=one * 4, # 512, # dimension of embedding space (these are NOT the speaker embeddings)
# Encoder parameters
enc_conv_num_layers=3, # number of encoder convolutional layers
enc_conv_kernel_size=(5,), # size of encoder convolution filters for each layer
enc_conv_channels=one * 4, # 512, # number of encoder convolutions filters for each layer
encoder_lstm_units=one * 2, # 256, # number of lstm units for each direction (forward and backward)
# Attention mechanism
smoothing=False, # Whether to smooth the attention normalization function
attention_dim=one * 1, # 128, # dimension of attention space
attention_filters=32, # number of attention convolution filters
attention_kernel=(31,), # kernel size of attention convolution
cumulative_weights=True,
# Whether to cumulate (sum) all previous attention weights or simply feed previous weights (
# Recommended: True)
# Decoder
prenet_layers=[one * 2, one * 2], # [256, 256], # number of layers and number of units of prenet
decoder_layers=2, # number of decoder lstm layers
decoder_lstm_units=one * 8, # 1024, # number of decoder lstm units on each layer
max_iters=2000,
# Max decoder steps during inference (Just for safety from infinite loop cases)
# Residual postnet
postnet_num_layers=5, # number of postnet convolutional layers
postnet_kernel_size=(5,), # size of postnet convolution filters for each layer
postnet_channels=one * 4, # 512, # number of postnet convolution filters for each layer
# CBHG mel->linear postnet
cbhg_kernels=8,
# All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act
# as "K-grams"
cbhg_conv_channels=one * 1, # 128, # Channels of the convolution bank
cbhg_pool_size=2, # pooling size of the CBHG
cbhg_projection=one * 2, # 256,
# projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections
cbhg_highwaynet_layers=4, # Number of HighwayNet layers
cbhg_highway_units=one * 1, # 128, # Number of units used in HighwayNet fully connected layers
cbhg_rnn_units=one * 1, # 128,
# Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in
# shape
# Loss params
mask_encoder=True,
# whether to mask encoder padding while computing attention. Set to True for better prosody
# but slower convergence.
mask_decoder=False,
# Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not
# be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight=20,
# Use class weights to reduce the stop token classes imbalance (by adding more penalty on
# False Negatives (FN)) (1 = disabled)
predict_linear=False,
# Whether to add a post-processing network to the Tacotron to predict linear spectrograms (
# True mode Not tested!!)
###########################################################################################################################################
# Tacotron Training
# Reproduction seeds
tacotron_random_seed=5339,
# Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_data_random_state=1234, # random state for train test split repeatability
# performance parameters
tacotron_swap_with_cpu=False,
# Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause
# major slowdowns! Only use when critical!)
# train/test split ratios, mini-batches sizes
tacotron_batch_size=64, # number of training samples on each training steps (was 32)
# Tacotron Batch synthesis supports ~16x the training batch size (no gradients during
# testing).
# Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times
# different from training. We thus recommend masking the encoder.
tacotron_synthesis_batch_size=128,
# DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN"T TRAIN TACOTRON WITH "mask_encoder=True"!!
tacotron_test_size=None, # 0.05
# % of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is
# enough to have a good idea about overfit)
tacotron_test_batches=2, # number of test batches.
# Learning rate schedule
tacotron_decay_learning_rate=True,
# boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay=10000, # 50000, # Step at which learning decay starts
tacotron_decay_steps=10000, # 50000, # Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate=0.5, # learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate=1e-3, # starting learning rate
tacotron_final_learning_rate=1e-5, # minimal learning rate
# Optimization parameters
tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter
tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter
tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter
# Regularization parameters
tacotron_reg_weight=1e-7, # regularization weight (for L2 regularization)
tacotron_scale_regularization=False,
# Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is
# high and biasing the model)
tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network
tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet
tacotron_clip_gradients=True, # whether to clip gradients
# Evaluation parameters
natural_eval=False,
# Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same
# teacher-forcing ratio as in training (just for overfit)
# Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode="constant"
# Curriculum Learning Scheme: From Teacher-Forcing to sampling from previous outputs is
# function of global step. (teacher forcing ratio decay) mode="scheduled"
# The second approach is inspired by:
# Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
# Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode="constant",
# Can be ("constant" or "scheduled"). "scheduled" mode applies a cosine teacher forcing ratio
# decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio=1.,
# Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder
# inputs, Only relevant if mode="constant"
tacotron_teacher_forcing_init_ratio=1.,
# initial teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_final_ratio=0.,
# final teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_start_decay=10000,
# starting point of teacher forcing ratio decay. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_steps=280000,
# Determines the teacher forcing ratio decay slope. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_alpha=0.,
# teacher forcing ratio decay rate. Relevant if mode="scheduled"
###########################################################################################################################################
# Tacotron-2 integration parameters
train_with_GTA=False,
# Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
# Eval sentences (if no eval text file was specified during synthesis, these sentences are
# used for eval)
sentences=["你好语音克隆模型。"],
### SV2TTS ###
speaker_embedding_size=256,
silence_min_duration_split=0.4, # Duration in seconds of a silence for an utterance to be split
utterance_min_duration=1., # Duration in seconds below which utterances are discarded
))
def hparams_debug_string():
# values = hparams.values()
hp = [" %s: %s" % (key, value) for key, value in hparams.items()]
return "Hyperparameters:\n" + "\n".join(hp)
| 53.901685
| 143
| 0.691594
|
from dotmap import DotMap
import json
class Dict2Obj(DotMap):
def __getitem__(self, k):
if k not in self._map:
return None
else:
return self._map[k]
def parse(self, json_string):
if json_string.strip():
_hp = json.loads(json_string)
for k, v in _hp.items():
self[k] = v
return self
one = 64
hparams = Dict2Obj(dict(
encoder_path=r"../models/encoder/saved_models/ge2e_pretrained.pt",
cleaners="chinese_cleaners",
center=True,
# resources related decisions.
# Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a
# little faster than the original
# pipeline for a single GPU as well. Great work!
# Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Tacotron only for now!
# WaveNet does not support multi GPU yet, WIP)
# Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_gpu_start_idx=0, # idx of the first GPU to be used for Tacotron training.
tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training.
split_on_cpu=True,
# Determines whether to split data on CPU or on first GPU. This is automatically True when
# more than 1 GPU is used.
###########################################################################################################################################
# Audio
# Audio parameters are the most important parameters to tune when using this work on your
# personal data. Below are the beginner steps to adapt
# this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how
# many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz,
# so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms
# window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto
# example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after
# win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen
# however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 +
# 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto
# example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only
# keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] *
# hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the
# parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying
# preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That
# will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing
# trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are
# showing black silent regions on top), then restart from step 2.
inv_mel_basis=None,
mel_basis=None,
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
# network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.9, # Rescaling value
# Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
# train samples of lengths between 3sec and 14sec are more than enough to make a model capable
# of good parallelization.
clip_mels_length=True,
# For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors,
# also consider clipping your samples to smaller chunks)
max_mel_frames=900,
# Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3
# and still getting OOM errors.
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
use_lws=False,
silence_threshold=2,
n_fft=800,
hop_size=200,
win_size=800,
sample_rate=16000,
frame_shift_ms=None,
trim_fft_size=512,
trim_hop_size=128,
trim_top_db=23,
signal_normalization=True,
allow_clipping_in_normalization=True,
symmetric_mels=True,
max_abs_value=4.,
normalize_for_wavenet=True,
clip_for_wavenet=True,
preemphasize=True,
preemphasis=0.97,
min_level_db=-100,
ref_level_db=20,
fmin=55,
fmax=7600,
power=1.5,
griffin_lim_iters=30,
| true
| true
|
7905afac64760f89be7efd42c89d662fe8709fed
| 1,633
|
py
|
Python
|
examples/example-hpmc.py
|
glotzerlab/garne
|
f9cb7bad391299e28feb4010eb77447fdc4512cb
|
[
"BSD-3-Clause"
] | 4
|
2019-07-30T00:12:44.000Z
|
2020-03-03T19:58:34.000Z
|
examples/example-hpmc.py
|
glotzerlab/garne
|
f9cb7bad391299e28feb4010eb77447fdc4512cb
|
[
"BSD-3-Clause"
] | 62
|
2019-07-29T20:05:46.000Z
|
2022-02-16T15:22:01.000Z
|
examples/example-hpmc.py
|
glotzerlab/garne
|
f9cb7bad391299e28feb4010eb77447fdc4512cb
|
[
"BSD-3-Clause"
] | 2
|
2020-03-03T19:59:09.000Z
|
2021-03-22T14:48:56.000Z
|
# Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import garnett
import hoomd
import hoomd.hpmc
# Vertices of a cube
cube_verts = [[-1, -1, -1], [-1, -1, 1], [-1, 1, 1], [-1, 1, -1],
[1, -1, -1], [1, -1, 1], [1, 1, 1], [1, 1, -1]]
with hoomd.context.SimulationContext():
box = hoomd.data.boxdim(L=10, dimensions=3)
snapshot = hoomd.data.make_snapshot(N=4, box=box)
snapshot.particles.position[:] = [
[2, 0, 0],
[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
]
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
# Restore a snapshot from saved data
with garnett.read('cube.gsd') as traj:
snapshot2 = system.take_snapshot()
traj[-1].to_hoomd_snapshot(snapshot2)
with hoomd.context.SimulationContext():
# Create a HOOMD snapshot from a garnett Trajectory frame
with garnett.read('cube.gsd') as traj:
snapshot = traj[-1].to_hoomd_snapshot()
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
| 34.020833
| 77
| 0.644825
|
import garnett
import hoomd
import hoomd.hpmc
cube_verts = [[-1, -1, -1], [-1, -1, 1], [-1, 1, 1], [-1, 1, -1],
[1, -1, -1], [1, -1, 1], [1, 1, 1], [1, 1, -1]]
with hoomd.context.SimulationContext():
box = hoomd.data.boxdim(L=10, dimensions=3)
snapshot = hoomd.data.make_snapshot(N=4, box=box)
snapshot.particles.position[:] = [
[2, 0, 0],
[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
]
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
with garnett.read('cube.gsd') as traj:
snapshot2 = system.take_snapshot()
traj[-1].to_hoomd_snapshot(snapshot2)
with hoomd.context.SimulationContext():
with garnett.read('cube.gsd') as traj:
snapshot = traj[-1].to_hoomd_snapshot()
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
| true
| true
|
7905aff6a945c8869807ad92a59d9cfa69b6e527
| 968
|
py
|
Python
|
powernad/Object/Ad/sub/AdFieldObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 34
|
2017-03-16T14:32:49.000Z
|
2022-03-18T09:23:05.000Z
|
powernad/Object/Ad/sub/AdFieldObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 16
|
2018-02-08T02:37:56.000Z
|
2022-03-15T13:45:34.000Z
|
powernad/Object/Ad/sub/AdFieldObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 19
|
2017-03-28T21:48:18.000Z
|
2021-11-30T05:13:43.000Z
|
import json
class AdFieldObject:
def __init__(self, json_def = None):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.pc_display = None if 'pc' not in s else self.pc_display_easy(s['pc'])
self.pc_final = None if 'pc' not in s else self.pc_final_easy(s['pc'])
self.mobile_display = None if 'mobile' not in s else self.mobile_display_easy(s['mobile'])
self.mobile_final = None if 'mobile' not in s else self.mobile_final_easy(s['mobile'])
self.headline = None if 'headline' not in s else s['headline']
self.description = None if 'description' not in s else s['description']
def pc_final_easy(self, pc):
return pc['final']
def pc_display_easy(self, pc):
return pc['display']
def mobile_final_easy(self, mobile):
return mobile['final']
def mobile_display_easy(self, mobile):
return mobile['display']
| 35.851852
| 99
| 0.639463
|
import json
class AdFieldObject:
def __init__(self, json_def = None):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.pc_display = None if 'pc' not in s else self.pc_display_easy(s['pc'])
self.pc_final = None if 'pc' not in s else self.pc_final_easy(s['pc'])
self.mobile_display = None if 'mobile' not in s else self.mobile_display_easy(s['mobile'])
self.mobile_final = None if 'mobile' not in s else self.mobile_final_easy(s['mobile'])
self.headline = None if 'headline' not in s else s['headline']
self.description = None if 'description' not in s else s['description']
def pc_final_easy(self, pc):
return pc['final']
def pc_display_easy(self, pc):
return pc['display']
def mobile_final_easy(self, mobile):
return mobile['final']
def mobile_display_easy(self, mobile):
return mobile['display']
| true
| true
|
7905b0f232e10628c0a9a48e27924a106d398bd1
| 1,936
|
py
|
Python
|
lintcode/724.1.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:49.000Z
|
2021-01-08T06:57:49.000Z
|
lintcode/724.1.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | null | null | null |
lintcode/724.1.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:52.000Z
|
2021-01-08T06:57:52.000Z
|
"""
724. Minimum Partition
https://www.lintcode.com/problem/minimum-partition/description
01背包
算法班2020 C27 01背包变形
第1种dp定义
dp[i][j]: considering previous i items to fill <=j, what the maximum value
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - nums[i - 1]] + nums[i - 1])
dp[0][0] = 0
dp[i][0] = 0
answer
max(dp[n])
2d array
time limit exceeded
"""
class Solution:
"""
@param nums: the given array
@return: the minimum difference between their sums
"""
def findMin(self, nums):
# write your code here
if not nums:
return 0
n = len(nums)
total_sum = sum(nums)
target = total_sum // 2
dp = [[0] * (target + 1) for _ in range(2)]
now, old = 0, 0
for i in range(1, n + 1):
old = now
now = 1 - now
for j in range(0, target + 1):
dp[now][j] = dp[old][j]
if j >= nums[i - 1]:
dp[now][j] = max(dp[now][j], dp[old][j - nums[i - 1]] + nums[i - 1])
return total_sum - 2 * max(dp[now])
s = Solution()
nums = [987,523,979,847,734,706,452,903,702,332,713,181,991,843,879,505,718,694,18,303,795,521,696,388,866,908,350,528,445,780,864,295,257,337,704,648,495,949,39,33,606,553,618,191,854,405,715,413,472,185,216,489,212,199,162,462,929,191,429,726,902,9,579,403,370,435,871,160,197,884,619,716,182,7,906,974,679,531,852,158,861,174,445,701,871,557,942,798,921,389,450,485,901,179,515,401,117,451,731,828,685,20,50,673,891,232,30,385,511,338,375,118,81,392,296,546,903,59,580,620,268,422,597,876,333,766,158,295,443,204,434,357,632,592,543,341,434,58,525,683,338,165,332,51,152,191,378,63,10,475,951,469,622,811,296,415,282,547,994,358,134,195,888,75,195,805,908,673,867,346,935,318,603,507,45,209,54,641,515,867,881,880,290,781,452,808,775,998,731,908,451,592,608,87,1000,812,30,673,393,380,241,135,421,144,954,64,747,502,633]
print(s.findMin(nums))
| 37.230769
| 823
| 0.61312
|
class Solution:
def findMin(self, nums):
if not nums:
return 0
n = len(nums)
total_sum = sum(nums)
target = total_sum // 2
dp = [[0] * (target + 1) for _ in range(2)]
now, old = 0, 0
for i in range(1, n + 1):
old = now
now = 1 - now
for j in range(0, target + 1):
dp[now][j] = dp[old][j]
if j >= nums[i - 1]:
dp[now][j] = max(dp[now][j], dp[old][j - nums[i - 1]] + nums[i - 1])
return total_sum - 2 * max(dp[now])
s = Solution()
nums = [987,523,979,847,734,706,452,903,702,332,713,181,991,843,879,505,718,694,18,303,795,521,696,388,866,908,350,528,445,780,864,295,257,337,704,648,495,949,39,33,606,553,618,191,854,405,715,413,472,185,216,489,212,199,162,462,929,191,429,726,902,9,579,403,370,435,871,160,197,884,619,716,182,7,906,974,679,531,852,158,861,174,445,701,871,557,942,798,921,389,450,485,901,179,515,401,117,451,731,828,685,20,50,673,891,232,30,385,511,338,375,118,81,392,296,546,903,59,580,620,268,422,597,876,333,766,158,295,443,204,434,357,632,592,543,341,434,58,525,683,338,165,332,51,152,191,378,63,10,475,951,469,622,811,296,415,282,547,994,358,134,195,888,75,195,805,908,673,867,346,935,318,603,507,45,209,54,641,515,867,881,880,290,781,452,808,775,998,731,908,451,592,608,87,1000,812,30,673,393,380,241,135,421,144,954,64,747,502,633]
print(s.findMin(nums))
| true
| true
|
7905b13417d53c5592a60a952e999d5d06d815d0
| 19,024
|
py
|
Python
|
cnn_phi_psi.py
|
Graveheart/ProteinSSPrediction
|
21bada89a592ff77e0d12063b7225b4f3da4fb1f
|
[
"MIT"
] | null | null | null |
cnn_phi_psi.py
|
Graveheart/ProteinSSPrediction
|
21bada89a592ff77e0d12063b7225b4f3da4fb1f
|
[
"MIT"
] | null | null | null |
cnn_phi_psi.py
|
Graveheart/ProteinSSPrediction
|
21bada89a592ff77e0d12063b7225b4f3da4fb1f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import os.path
import math
import tensorflow as tf
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
LOGDIR = "/tmp/cnn_backbone_angles/"
# Parameters
batch_size = 5
training_epochs = 10
display_step = 1
internal_channels_1 = 100
internal_channels_2 = 100
internal_channels_3 = 100
internal_channels_4 = 50
window_size = 11
beta = 0.001
values_to_predict = 2
num_splits = 10
alpha = 0.2
dropout_keep_rate = 0.5
learning_rate = 1E-3
keep_prob = tf.placeholder_with_default(1.0, shape=(), name="keep_prob")
keep_prob_input = tf.placeholder_with_default(1.0, shape=(), name="keep_prob_input")
def fc_layer(input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([window_size, size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
act = conv1d(input, w) + b
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w
def convnn(x, channels_num, layers_num, window_size = 11):
W_arr = []
layers = []
# First convolutional layer
input_dimensions = x.get_shape().as_list()[1:]
filter_shape = [window_size, input_dimensions[-1], channels_num]
W_input = weight_variable(filter_shape)
W_arr.append(W_input)
b_input = bias_variable([input_dimensions[0], channels_num])
input_layer = tf.nn.relu(conv1d(x, W_input) + b_input)
dropout_input = tf.nn.dropout(input_layer, keep_prob_input)
layers.append(dropout_input)
# Hidden layers
filter_shape = [window_size, channels_num, channels_num]
W_hidden = tf.constant([], dtype=tf.float32)
for i in range(layers_num):
with tf.name_scope("conv"):
W_hidden = weight_variable(filter_shape)
W_arr.append(W_hidden)
b_hidden = bias_variable([input_dimensions[0], channels_num])
conv_layer = tf.nn.tanh(alpha*conv1d(layers[i], W_hidden) + b_hidden)
tf.summary.histogram("weights", W_hidden)
tf.summary.histogram("biases", b_hidden)
tf.summary.histogram("activations", conv_layer)
with tf.name_scope("dropout"):
dropout = tf.nn.dropout(conv_layer, keep_prob)
layers.append(dropout)
# Output convolutional layer
layer_out, W_out = fc_layer(layers[-1], channels_num, values_to_predict)
W_arr.append(W_out)
# layer_out = tf.atan2(tf.sin(layer_out), tf.cos(layer_out))
# Loss function with L2 Regularization with beta=0.001
regularizers = tf.nn.l2_loss(W_input) + tf.nn.l2_loss(W_hidden) * layers_num + tf.nn.l2_loss(W_out)
# regularizers = tf.constant(0, dtype=tf.float32)
# for W in W_arr:
# regularizers += tf.nn.l2_loss(W)
return layer_out, regularizers
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="W")
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="B")
def conv1d(x, W):
"""conv1d returns a 1d convolution layer."""
return tf.nn.conv1d(x, W, 1, 'SAME')
def avgpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def calculate_accuracy(predictions, labels):
num_proteins = predictions.shape[0]
protein_accuracy = np.zeros(num_proteins, dtype=np.float32)
label_accuracy = {1: {"total": 0, "correct": 0}, 2: {"total": 0, "correct": 0},
3: {"total": 0, "correct": 0}}
for i in range(num_proteins):
total_predictions = 0
correct_predictions = 0
for j in range(predictions.shape[1]):
phi = math.degrees(labels[i][j][0])
phi0 = math.degrees(predictions[i][j][0])
psi = math.degrees(labels[i][j][1])
psi0 = math.degrees(predictions[i][j][1])
if (phi != 0) or (psi != 0):
total_predictions += 1
expected_state = get_backbone_distribution(labels[i][j])
predicted_state = get_backbone_distribution(predictions[i][j])
label_accuracy[predicted_state]["total"] += 1
if (predicted_state == expected_state):
# correct_predictions += 1
label_accuracy[predicted_state]["correct"] += 1
# print("REAL PHI->>>>>"+str(labels[i][j][0]))
# print("PREDICTED PHI->>>>>" + str(predictions[i][j][0]))
diff = math.sqrt(math.pow(phi - phi0, 2)+math.pow(psi - psi0, 2))
diff_phi = phi0 - phi0
diff_psi = psi - psi0
criteria_1 = (np.abs(diff_phi) < 60) & (np.abs(diff_psi) < 60)
criteria_2 = (np.abs(diff_phi+diff_psi) < 60) & (np.abs(diff_psi) < 90) & (np.abs(diff_phi) < 90)
if (diff < 60):
correct_predictions += 1
# print("CORRECT->>>>>"+str(correct_predictions))
# print("TOTAL->>>>>" + str(total_predictions))
if (total_predictions > 0):
protein_accuracy[i] = correct_predictions / float(total_predictions)
accuracy_dist = {}
total = 0
correct = 0
for label, val in label_accuracy.iteritems():
if (val["total"] > 0):
accuracy_dist[label] = val["correct"]/val["total"]
total += val["total"]
correct += val["correct"]
if (total > 0):
accuracy_dist["total"] = correct/total
return protein_accuracy, accuracy_dist
def get_backbone_distribution(angles):
phi = math.degrees(angles[0])
psi = math.degrees(angles[1])
# A: -160 < phi <0 and -70 < psi < 60
if (-160 < phi < 0) & (-70 < psi < 60):
return 1
# P: 0 < phi < 160 and -60 < psi < 95
elif (0 < phi < 160) & (-60 < psi < 95):
return 2
else:
return 3
def plot_ramachandran(predictions, title):
phi_angles = predictions[:][:][0].flatten()
phi_angles = list(map(lambda x: math.degrees(x), phi_angles))
psi_angles = predictions[:][:][1].flatten()
psi_angles = list(map(lambda x: math.degrees(x), psi_angles))
colors = np.random.rand(len(psi_angles))
fig = plt.figure()
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.title(title)
plt.xlabel('phi')
plt.ylabel('psi')
plt.grid()
plt.scatter(phi_angles, psi_angles, alpha=0.5, c=colors)
fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
# plt.show()
# fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
plt.close()
def plot_loss(loss_arr):
l = plt.figure()
plt.plot(loss_arr)
plt.title('Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(plot_legend, loc='upper left')
l.show()
def make_hparam_string(layers_num, channels_num, test_session):
return "nl_%s,nc_%s, session%s" % (layers_num, channels_num, test_session)
def convert_to_degrees(arr):
"""Covert all phi and psi angles to degrees"""
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr
data = np.load('phipsi_features.npz')['features']
all_data = data.reshape(data.shape[0],700,69)
# all_data = all_data[0:300]
all_sets = all_data[:,:,0:21]
all_sets = np.concatenate([all_sets, all_data[:,:,21:42]], axis=-1)
all_sets = np.concatenate([all_sets, all_data[:,:,42:63]], axis=-1)
# all_labels = all_data[:,:,63:67]
all_angles = all_data[:,:,67:69]
where_are_NaNs = np.isnan(all_angles)
all_angles[where_are_NaNs] = 0.0
k_fold = KFold(n_splits=num_splits)
layers_channels = [(6, 100), (7, 100)]
# Build the convolutional network
for layers_num, channels_num in layers_channels:
for use_l2 in [False, True]:
for use_early_stopping in [True, False]:
crossvalidation_train_accuracy = 0
crossvalidation_test_accuracy = 0
crossvalidation_accuracy_distr = {'total': 0, 1: 0, 2: 0, 3: 0}
crossvalidation_test_mae = 0
executed_epochs = 0
train_session = 0
test_session = 0
learning_rate_type = 1
for train_index, test_index in k_fold.split(all_sets):
train_set, test_set = all_sets[train_index], all_sets[test_index]
train_labels, test_labels = all_angles[train_index], all_angles[test_index]
train_size = train_set.shape[0]
train_y = train_labels
test_y = test_labels
test_session += 1
# Create the model
x = tf.placeholder(tf.float32, [None, 700, train_set[0].shape[-1]], name="x")
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 700, values_to_predict], name="labels")
y_nn, regularizers = convnn(x, channels_num, layers_num, window_size)
prediction = y_nn
with tf.name_scope("loss"):
deviations = tf.subtract(prediction, y_)
ae = tf.abs(deviations)
mae = tf.reduce_mean(ae)
atan2 = tf.atan2(tf.sin(deviations), tf.cos(deviations))
loss = tf.square(atan2, name="loss")
mean_loss = tf.reduce_mean(loss)
loss_summary = tf.summary.scalar("loss", mean_loss)
with tf.name_scope("loss2"):
# print(tf.shape(prediction))
# print(tf.shape(y_))
phi = prediction[:, :, 0]
phi0 = y_[:, :, 0]
psi = prediction[:, :, 1]
psi0 = y_[:,:, 1]
# cos_phi_diff = tf.square(tf.subtract(tf.cos(phi), tf.cos(phi0)))
# sin_phi_diff = tf.square(tf.subtract(tf.sin(phi), tf.sin(phi0)))
# cos_psi_diff = tf.square(tf.subtract(tf.cos(psi), tf.cos(psi0)))
# sin_psi_diff = tf.square(tf.subtract(tf.sin(psi), tf.sin(psi0)))
# phi_squared_sum = tf.add(cos_phi_diff, sin_phi_diff)
# psi_squared_sum = tf.add(cos_psi_diff, sin_psi_diff)
phi_diff = tf.reduce_sum(tf.squared_difference(phi, phi0))/2
psi_diff = tf.reduce_sum(tf.squared_difference(psi, psi0))/2
loss2 = tf.add(phi_diff, psi_diff)
with tf.name_scope("mse"):
mse = tf.squared_difference(prediction, y_)
mse_summary = tf.summary.scalar("mse", mse)
with tf.name_scope("l2_loss"):
l2_loss = beta * regularizers
if (use_l2):
loss = loss + l2_loss
loss = tf.reduce_mean(loss)
l2_summary = tf.summary.scalar("l2_loss", l2_loss)
with tf.name_scope("train"):
# Use Adam optimizer
optimization = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# with tf.name_scope("accuracy"):
# correct_prediction = tf.equal(prediction, y)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
print("Window size: " + str(window_size))
print("Layers: " + str(layers_num))
print("Channels: " + str(channels_num))
print("Beta: " + str(beta))
print("Use L2: " + str(use_l2))
print("Use Early stopping: " + str(use_early_stopping))
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
min_delta = 0.01
plot_legend = []
previous_epoch_min = 100
min_validation_loss = 100
for epoch in range(training_epochs):
train_session += 1
loss_arr = []
previous_batch_loss = 0.0
patience = 6
patience_cnt = 0
hparam = make_hparam_string(layers_num, channels_num, train_session)
writer = tf.summary.FileWriter(LOGDIR + hparam)
writer.add_graph(sess.graph)
total_batches = int(train_size/batch_size)
# Loop over all batches
for i in range(total_batches):
start_index = i * batch_size
stop_index = (i+1) * batch_size
batch_x = train_set[start_index:stop_index]
batch_y = train_y[start_index:stop_index]
# Run optimization op
# backprop and cost op (to get loss value)
if i % 5 == 0:
batch_predictions, l_summ, batch_loss = sess.run([prediction, loss_summary, loss], feed_dict={x: batch_x, y_: batch_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
writer.add_summary(l_summ, i+1)
loss_arr.append(batch_loss)
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
# batch_predictions = np.apply_along_axis(convert_to_degrees, 2, batch_predictions)
batch_accuracy, batch_distr = calculate_accuracy(batch_predictions, batch_y)
# print('step %d, training accuracy %g' % (i, np.average(batch_accuracy)))
# early stopping
if(use_early_stopping):
if (epoch > 2 and i > total_batches / 2 and batch_loss < previous_epoch_min):
previous_epoch_min = min(loss_arr)
print("Early stopping!!")
break
optimization.run(feed_dict={x: batch_x, y_: batch_y})
previous_epoch_min = min(loss_arr)
# Display logs per epoch step
if epoch % display_step == 0:
predictions, train_loss = sess.run([prediction,loss], feed_dict={x: train_set, y_: train_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
# predictions = np.apply_along_axis(convert_to_degrees, 2, predictions)
# plot_ramachandran(train_y, "Real values_"+str(epoch))
# raw_input()
train_accuracy, train_acc_distr = calculate_accuracy(predictions, train_y)
train_accuracy = np.average(train_accuracy)
crossvalidation_train_accuracy += train_accuracy
plot_legend.append('train_' + str(epoch))
# plot_loss(loss_arr)
# print("Training accuracy: ", \
# "{:.6f}".format(train_accuracy))
if (epoch > training_epochs / 2):
valid_predictions, valid_loss, valid_mae = sess.run([prediction, loss, mae], feed_dict={x: test_set, y_: test_y})
# valid_predictions = np.apply_along_axis(convert_to_degrees, 2, valid_predictions)
valid_accuracy, valid_acc_distr = calculate_accuracy(valid_predictions, test_y)
valid_accuracy = np.average(valid_accuracy)
if (epoch >= training_epochs - 1):
if (valid_loss < min_validation_loss):
training_epochs += 1
print("INCREASING EPOCHS")
else:
crossvalidation_test_accuracy += valid_accuracy
crossvalidation_test_mae += valid_mae
for label in valid_acc_distr:
crossvalidation_accuracy_distr[label] += valid_acc_distr[label]
print(crossvalidation_accuracy_distr)
if (epoch >= training_epochs - 2):
min_validation_loss = valid_loss
print(valid_acc_distr)
print("Validation accuracy: ", \
"{:.6f}".format(valid_accuracy))
executed_epochs += 1
# Test trained model
test_predictions, test_summ, test_mae = sess.run([prediction, loss_summary, mae], feed_dict={x: test_set, y_: test_y})
writer.add_summary(test_summ, i + 1)
test_accuracy, test_acc_distr = calculate_accuracy(test_predictions, test_y)
plot_ramachandran(test_predictions, "Predictions Fold "+str(test_session))
plot_ramachandran(test_y, "Real values Fold "+str(test_session))
# plot_legend.append('validation')
print(test_acc_distr)
# test_accuracy = np.average(test_accuracy)
# crossvalidation_test_accuracy += test_accuracy
# crossvalidation_test_mae += test_mae
# print("Testing accuracy: ", \
# "{:.6f}".format(test_accuracy))
for label in crossvalidation_accuracy_distr:
crossvalidation_accuracy_distr[label] /= num_splits
print(crossvalidation_accuracy_distr)
# print("Final Testing DISTR: ", \
# "{:.6f}".format(crossvalidation_test_mae / num_splits))
print("Final Testing MAE: ", \
"{:.6f}".format(crossvalidation_test_mae / num_splits))
# print("Final Training accuracy: ", \
# "{:.6f}".format(crossvalidation_train_accuracy / (num_splits*training_epochs)))
print("Final Test accuracy: ", \
"{:.6f}".format(crossvalidation_test_accuracy / num_splits))
print('Run `tensorboard --logdir=%s` to see the results.' % LOGDIR)
# valid_predictions = sess.run(tf.argmax(prediction, 2), feed_dict={x: valid_x, y_: valid_y})
# valid_labels = np.argmax(valid_y, 2)
# valid_accuracy = calculate_accuracy(valid_predictions, valid_labels)
# print("Validation accuracy: ", \
# "{:.6f}".format(valid_accuracy))
| 46.062954
| 199
| 0.568545
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import os.path
import math
import tensorflow as tf
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
LOGDIR = "/tmp/cnn_backbone_angles/"
batch_size = 5
training_epochs = 10
display_step = 1
internal_channels_1 = 100
internal_channels_2 = 100
internal_channels_3 = 100
internal_channels_4 = 50
window_size = 11
beta = 0.001
values_to_predict = 2
num_splits = 10
alpha = 0.2
dropout_keep_rate = 0.5
learning_rate = 1E-3
keep_prob = tf.placeholder_with_default(1.0, shape=(), name="keep_prob")
keep_prob_input = tf.placeholder_with_default(1.0, shape=(), name="keep_prob_input")
def fc_layer(input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([window_size, size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
act = conv1d(input, w) + b
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w
def convnn(x, channels_num, layers_num, window_size = 11):
W_arr = []
layers = []
input_dimensions = x.get_shape().as_list()[1:]
filter_shape = [window_size, input_dimensions[-1], channels_num]
W_input = weight_variable(filter_shape)
W_arr.append(W_input)
b_input = bias_variable([input_dimensions[0], channels_num])
input_layer = tf.nn.relu(conv1d(x, W_input) + b_input)
dropout_input = tf.nn.dropout(input_layer, keep_prob_input)
layers.append(dropout_input)
filter_shape = [window_size, channels_num, channels_num]
W_hidden = tf.constant([], dtype=tf.float32)
for i in range(layers_num):
with tf.name_scope("conv"):
W_hidden = weight_variable(filter_shape)
W_arr.append(W_hidden)
b_hidden = bias_variable([input_dimensions[0], channels_num])
conv_layer = tf.nn.tanh(alpha*conv1d(layers[i], W_hidden) + b_hidden)
tf.summary.histogram("weights", W_hidden)
tf.summary.histogram("biases", b_hidden)
tf.summary.histogram("activations", conv_layer)
with tf.name_scope("dropout"):
dropout = tf.nn.dropout(conv_layer, keep_prob)
layers.append(dropout)
layer_out, W_out = fc_layer(layers[-1], channels_num, values_to_predict)
W_arr.append(W_out)
regularizers = tf.nn.l2_loss(W_input) + tf.nn.l2_loss(W_hidden) * layers_num + tf.nn.l2_loss(W_out)
return layer_out, regularizers
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="W")
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="B")
def conv1d(x, W):
return tf.nn.conv1d(x, W, 1, 'SAME')
def avgpool2d(x, k=2):
return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def calculate_accuracy(predictions, labels):
num_proteins = predictions.shape[0]
protein_accuracy = np.zeros(num_proteins, dtype=np.float32)
label_accuracy = {1: {"total": 0, "correct": 0}, 2: {"total": 0, "correct": 0},
3: {"total": 0, "correct": 0}}
for i in range(num_proteins):
total_predictions = 0
correct_predictions = 0
for j in range(predictions.shape[1]):
phi = math.degrees(labels[i][j][0])
phi0 = math.degrees(predictions[i][j][0])
psi = math.degrees(labels[i][j][1])
psi0 = math.degrees(predictions[i][j][1])
if (phi != 0) or (psi != 0):
total_predictions += 1
expected_state = get_backbone_distribution(labels[i][j])
predicted_state = get_backbone_distribution(predictions[i][j])
label_accuracy[predicted_state]["total"] += 1
if (predicted_state == expected_state):
label_accuracy[predicted_state]["correct"] += 1
diff = math.sqrt(math.pow(phi - phi0, 2)+math.pow(psi - psi0, 2))
diff_phi = phi0 - phi0
diff_psi = psi - psi0
criteria_1 = (np.abs(diff_phi) < 60) & (np.abs(diff_psi) < 60)
criteria_2 = (np.abs(diff_phi+diff_psi) < 60) & (np.abs(diff_psi) < 90) & (np.abs(diff_phi) < 90)
if (diff < 60):
correct_predictions += 1
if (total_predictions > 0):
protein_accuracy[i] = correct_predictions / float(total_predictions)
accuracy_dist = {}
total = 0
correct = 0
for label, val in label_accuracy.iteritems():
if (val["total"] > 0):
accuracy_dist[label] = val["correct"]/val["total"]
total += val["total"]
correct += val["correct"]
if (total > 0):
accuracy_dist["total"] = correct/total
return protein_accuracy, accuracy_dist
def get_backbone_distribution(angles):
phi = math.degrees(angles[0])
psi = math.degrees(angles[1])
if (-160 < phi < 0) & (-70 < psi < 60):
return 1
elif (0 < phi < 160) & (-60 < psi < 95):
return 2
else:
return 3
def plot_ramachandran(predictions, title):
phi_angles = predictions[:][:][0].flatten()
phi_angles = list(map(lambda x: math.degrees(x), phi_angles))
psi_angles = predictions[:][:][1].flatten()
psi_angles = list(map(lambda x: math.degrees(x), psi_angles))
colors = np.random.rand(len(psi_angles))
fig = plt.figure()
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.title(title)
plt.xlabel('phi')
plt.ylabel('psi')
plt.grid()
plt.scatter(phi_angles, psi_angles, alpha=0.5, c=colors)
fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
plt.close()
def plot_loss(loss_arr):
l = plt.figure()
plt.plot(loss_arr)
plt.title('Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(plot_legend, loc='upper left')
l.show()
def make_hparam_string(layers_num, channels_num, test_session):
return "nl_%s,nc_%s, session%s" % (layers_num, channels_num, test_session)
def convert_to_degrees(arr):
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr
data = np.load('phipsi_features.npz')['features']
all_data = data.reshape(data.shape[0],700,69)
all_sets = all_data[:,:,0:21]
all_sets = np.concatenate([all_sets, all_data[:,:,21:42]], axis=-1)
all_sets = np.concatenate([all_sets, all_data[:,:,42:63]], axis=-1)
all_angles = all_data[:,:,67:69]
where_are_NaNs = np.isnan(all_angles)
all_angles[where_are_NaNs] = 0.0
k_fold = KFold(n_splits=num_splits)
layers_channels = [(6, 100), (7, 100)]
for layers_num, channels_num in layers_channels:
for use_l2 in [False, True]:
for use_early_stopping in [True, False]:
crossvalidation_train_accuracy = 0
crossvalidation_test_accuracy = 0
crossvalidation_accuracy_distr = {'total': 0, 1: 0, 2: 0, 3: 0}
crossvalidation_test_mae = 0
executed_epochs = 0
train_session = 0
test_session = 0
learning_rate_type = 1
for train_index, test_index in k_fold.split(all_sets):
train_set, test_set = all_sets[train_index], all_sets[test_index]
train_labels, test_labels = all_angles[train_index], all_angles[test_index]
train_size = train_set.shape[0]
train_y = train_labels
test_y = test_labels
test_session += 1
x = tf.placeholder(tf.float32, [None, 700, train_set[0].shape[-1]], name="x")
y_ = tf.placeholder(tf.float32, [None, 700, values_to_predict], name="labels")
y_nn, regularizers = convnn(x, channels_num, layers_num, window_size)
prediction = y_nn
with tf.name_scope("loss"):
deviations = tf.subtract(prediction, y_)
ae = tf.abs(deviations)
mae = tf.reduce_mean(ae)
atan2 = tf.atan2(tf.sin(deviations), tf.cos(deviations))
loss = tf.square(atan2, name="loss")
mean_loss = tf.reduce_mean(loss)
loss_summary = tf.summary.scalar("loss", mean_loss)
with tf.name_scope("loss2"):
phi = prediction[:, :, 0]
phi0 = y_[:, :, 0]
psi = prediction[:, :, 1]
psi0 = y_[:,:, 1]
phi_diff = tf.reduce_sum(tf.squared_difference(phi, phi0))/2
psi_diff = tf.reduce_sum(tf.squared_difference(psi, psi0))/2
loss2 = tf.add(phi_diff, psi_diff)
with tf.name_scope("mse"):
mse = tf.squared_difference(prediction, y_)
mse_summary = tf.summary.scalar("mse", mse)
with tf.name_scope("l2_loss"):
l2_loss = beta * regularizers
if (use_l2):
loss = loss + l2_loss
loss = tf.reduce_mean(loss)
l2_summary = tf.summary.scalar("l2_loss", l2_loss)
with tf.name_scope("train"):
optimization = tf.train.AdamOptimizer(learning_rate).minimize(loss)
summ = tf.summary.merge_all()
print("Window size: " + str(window_size))
print("Layers: " + str(layers_num))
print("Channels: " + str(channels_num))
print("Beta: " + str(beta))
print("Use L2: " + str(use_l2))
print("Use Early stopping: " + str(use_early_stopping))
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
min_delta = 0.01
plot_legend = []
previous_epoch_min = 100
min_validation_loss = 100
for epoch in range(training_epochs):
train_session += 1
loss_arr = []
previous_batch_loss = 0.0
patience = 6
patience_cnt = 0
hparam = make_hparam_string(layers_num, channels_num, train_session)
writer = tf.summary.FileWriter(LOGDIR + hparam)
writer.add_graph(sess.graph)
total_batches = int(train_size/batch_size)
for i in range(total_batches):
start_index = i * batch_size
stop_index = (i+1) * batch_size
batch_x = train_set[start_index:stop_index]
batch_y = train_y[start_index:stop_index]
if i % 5 == 0:
batch_predictions, l_summ, batch_loss = sess.run([prediction, loss_summary, loss], feed_dict={x: batch_x, y_: batch_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
writer.add_summary(l_summ, i+1)
loss_arr.append(batch_loss)
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
batch_accuracy, batch_distr = calculate_accuracy(batch_predictions, batch_y)
if(use_early_stopping):
if (epoch > 2 and i > total_batches / 2 and batch_loss < previous_epoch_min):
previous_epoch_min = min(loss_arr)
print("Early stopping!!")
break
optimization.run(feed_dict={x: batch_x, y_: batch_y})
previous_epoch_min = min(loss_arr)
if epoch % display_step == 0:
predictions, train_loss = sess.run([prediction,loss], feed_dict={x: train_set, y_: train_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
train_accuracy, train_acc_distr = calculate_accuracy(predictions, train_y)
train_accuracy = np.average(train_accuracy)
crossvalidation_train_accuracy += train_accuracy
plot_legend.append('train_' + str(epoch))
if (epoch > training_epochs / 2):
valid_predictions, valid_loss, valid_mae = sess.run([prediction, loss, mae], feed_dict={x: test_set, y_: test_y})
valid_accuracy, valid_acc_distr = calculate_accuracy(valid_predictions, test_y)
valid_accuracy = np.average(valid_accuracy)
if (epoch >= training_epochs - 1):
if (valid_loss < min_validation_loss):
training_epochs += 1
print("INCREASING EPOCHS")
else:
crossvalidation_test_accuracy += valid_accuracy
crossvalidation_test_mae += valid_mae
for label in valid_acc_distr:
crossvalidation_accuracy_distr[label] += valid_acc_distr[label]
print(crossvalidation_accuracy_distr)
if (epoch >= training_epochs - 2):
min_validation_loss = valid_loss
print(valid_acc_distr)
print("Validation accuracy: ", \
"{:.6f}".format(valid_accuracy))
executed_epochs += 1
test_predictions, test_summ, test_mae = sess.run([prediction, loss_summary, mae], feed_dict={x: test_set, y_: test_y})
writer.add_summary(test_summ, i + 1)
test_accuracy, test_acc_distr = calculate_accuracy(test_predictions, test_y)
plot_ramachandran(test_predictions, "Predictions Fold "+str(test_session))
plot_ramachandran(test_y, "Real values Fold "+str(test_session))
print(test_acc_distr)
for label in crossvalidation_accuracy_distr:
crossvalidation_accuracy_distr[label] /= num_splits
print(crossvalidation_accuracy_distr)
print("Final Testing MAE: ", \
"{:.6f}".format(crossvalidation_test_mae / num_splits))
print("Final Test accuracy: ", \
"{:.6f}".format(crossvalidation_test_accuracy / num_splits))
print('Run `tensorboard --logdir=%s` to see the results.' % LOGDIR)
| true
| true
|
7905b1451ce0807588a7e88fb4ac003ba7e67b26
| 2,802
|
py
|
Python
|
extra_code/transformers-gpt2-finetune.py
|
cipher982/Wine-o-matic
|
a8000bf5ec86554e9c3c746aae51ba509ab59162
|
[
"Apache-2.0"
] | 4
|
2019-04-18T20:34:53.000Z
|
2022-01-18T10:09:55.000Z
|
extra_code/transformers-gpt2-finetune.py
|
cipher982/Wine-o-matic
|
a8000bf5ec86554e9c3c746aae51ba509ab59162
|
[
"Apache-2.0"
] | 11
|
2020-01-28T22:09:12.000Z
|
2021-04-01T19:57:29.000Z
|
extra_code/transformers-gpt2-finetune.py
|
cipher982/Wine-o-matic
|
a8000bf5ec86554e9c3c746aae51ba509ab59162
|
[
"Apache-2.0"
] | null | null | null |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import deepspeed
# import mpi4py
# import pandas
import torch
import transformers
import wandb
#%env WANDB_PROJECT=wine_gpt2_Trainer_42
MODEL_NAME = "gpt2-medium"
# wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107")
wandb.init()
# wandb.watch(log='all')
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
# Tokenizers
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
# wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2)
# wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True)
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
# wine_dataset_train = wineDataset(wine_encodings_train)
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
# train_loader = torch.utils.data.DataLoader(wine_dataset_train)
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# model.to('cuda')
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
# deepspeed='data/ds_config.json'
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
| 26.433962
| 98
| 0.732691
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import transformers
import wandb
MODEL_NAME = "gpt2-medium"
wandb.init()
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
| true
| true
|
7905b1f1ad67b3664126b65439ec381c3749ddb9
| 438
|
py
|
Python
|
insta/admin.py
|
MachariaMark/fakeinsta
|
df997f5bb9e6ba4e5573d132bd260bba1046cdbd
|
[
"MIT"
] | null | null | null |
insta/admin.py
|
MachariaMark/fakeinsta
|
df997f5bb9e6ba4e5573d132bd260bba1046cdbd
|
[
"MIT"
] | null | null | null |
insta/admin.py
|
MachariaMark/fakeinsta
|
df997f5bb9e6ba4e5573d132bd260bba1046cdbd
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
class ImageAdmin(admin.ModelAdmin):
fields = ( 'image','name','caption','profile','post_date', 'user', )
readonly_fields = ('profile', 'post_date', 'user',)
#registering the models
# admin.site.register(Image, ImageAdmin)
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Like)
admin.site.register(Comment)
| 29.2
| 70
| 0.755708
|
from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
class ImageAdmin(admin.ModelAdmin):
fields = ( 'image','name','caption','profile','post_date', 'user', )
readonly_fields = ('profile', 'post_date', 'user',)
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Like)
admin.site.register(Comment)
| true
| true
|
7905b27af6d2ff50a968e6c1314399485b1e289d
| 4,387
|
py
|
Python
|
test/test_tree.py
|
virtualms/DomainTree
|
07921b8894b87fd26928cad4ddd30c31be8a5101
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_tree.py
|
virtualms/DomainTree
|
07921b8894b87fd26928cad4ddd30c31be8a5101
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_tree.py
|
virtualms/DomainTree
|
07921b8894b87fd26928cad4ddd30c31be8a5101
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import unittest
from domain_tree.tree import DomainTree, DomainNode, NodeNotFoundException
from domain_tree.domain import RealDomain, RealInterval
class TestDomainTree(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self):
# self.d0 = {"x0": (0, 1)}
self.d0 = RealDomain({"x0": RealInterval((0, 1), (True, False))})
def tearDown(self) -> None:
pass
def test_npartition(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
self.assertEqual(len(tree.leaves), 2)
tree = DomainTree(domains=self.d0, min_split=0.3)
self.assertIn(len(tree.leaves), [2, 3])
tree = DomainTree(domains=self.d0, min_split=0.2)
self.assertIn(len(tree.leaves), [3, 4, 5])
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
n = (2 ** 5) / 2
self.assertEqual(len(tree.leaves), n)
def test_stress_functions(self):
for _ in range(10000):
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
tree = DomainTree(domains=self.d0, min_split=0, depth_max=10)
for _ in range(10000):
tree.compute_f({"x0": random.random()})
with self.assertRaises(NodeNotFoundException):
for _ in range(10000):
tree.compute_f({"x0": random.random() + 1})
def test_contains(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
x = {"x0": 0}
self.assertTrue(tree.contains(x))
x = {"x0": 1}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5}
self.assertTrue(tree.contains(x))
#d = {"x0": (0, 1), "x1": (2, 3)}
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
tree = DomainTree(domains=d, min_split=0.5)
x = {"x0": 0, "x1": 2}
self.assertTrue(tree.contains(x))
x = {"x0": 1, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5, "x1": 2.99}
self.assertTrue(tree.contains(x))
d = RealDomain({"x0": RealInterval((0, 1), (True, True)), "x1": RealInterval((2, 3), (False, False))})
tree = DomainTree(domains=d, min_split=0.5)
#tree.print_tree()
x = {"x0": 0, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0, "x1": 2.5}
self.assertTrue(tree.contains(x))
def test_compute_f(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
with self.assertRaises(NodeNotFoundException):
tree.node_which_contains({"x0": -12})
x = {"x0": 0}
node = tree.node_which_contains(x)
self.assertIsNotNone(node.regression)
b = node.regression.coef_[0]
c = node.regression.intercept_
self.assertEqual(node.regression.predict([list(x.values())]), b * x[list(x.keys())[0]] + c)
self.assertEqual(tree.compute_f(x), node.regression.predict([list(x.values())]))
class TestDomainNode(unittest.TestCase):
def setUp(self):
self.val = 10
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
self.node = DomainNode(name="nome", domains=d, val=self.val)
def tearDown(self) -> None:
pass
def test_generate_regression(self):
self.node.generate_regression()
self.assertIsNotNone(self.node.regression)
self.assertIsNotNone(self.node.regression.coef_)
self.assertIsNotNone(self.node.regression.intercept_)
def test_contains(self):
self.assertTrue(self.node.contains({"x0": 0, "x1": 2}))
self.assertTrue(self.node.contains({"x0": 0.5, "x1": 2.5}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 2}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 3}))
self.assertFalse(self.node.contains({"x0": 0.2, "x1": 3}))
def test_kill(self):
self.node.dostuff(random=0.5)
self.assertIn(self.node.val, [self.val - 2, self.val - 1])
self.node.kill()
self.assertEqual(self.node.val, 0)
if __name__ == "__main__":
unittest.main()
| 33.48855
| 111
| 0.57488
|
import random
import unittest
from domain_tree.tree import DomainTree, DomainNode, NodeNotFoundException
from domain_tree.domain import RealDomain, RealInterval
class TestDomainTree(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self):
self.d0 = RealDomain({"x0": RealInterval((0, 1), (True, False))})
def tearDown(self) -> None:
pass
def test_npartition(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
self.assertEqual(len(tree.leaves), 2)
tree = DomainTree(domains=self.d0, min_split=0.3)
self.assertIn(len(tree.leaves), [2, 3])
tree = DomainTree(domains=self.d0, min_split=0.2)
self.assertIn(len(tree.leaves), [3, 4, 5])
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
n = (2 ** 5) / 2
self.assertEqual(len(tree.leaves), n)
def test_stress_functions(self):
for _ in range(10000):
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
tree = DomainTree(domains=self.d0, min_split=0, depth_max=10)
for _ in range(10000):
tree.compute_f({"x0": random.random()})
with self.assertRaises(NodeNotFoundException):
for _ in range(10000):
tree.compute_f({"x0": random.random() + 1})
def test_contains(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
x = {"x0": 0}
self.assertTrue(tree.contains(x))
x = {"x0": 1}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5}
self.assertTrue(tree.contains(x))
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
tree = DomainTree(domains=d, min_split=0.5)
x = {"x0": 0, "x1": 2}
self.assertTrue(tree.contains(x))
x = {"x0": 1, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5, "x1": 2.99}
self.assertTrue(tree.contains(x))
d = RealDomain({"x0": RealInterval((0, 1), (True, True)), "x1": RealInterval((2, 3), (False, False))})
tree = DomainTree(domains=d, min_split=0.5)
x = {"x0": 0, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0, "x1": 2.5}
self.assertTrue(tree.contains(x))
def test_compute_f(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
with self.assertRaises(NodeNotFoundException):
tree.node_which_contains({"x0": -12})
x = {"x0": 0}
node = tree.node_which_contains(x)
self.assertIsNotNone(node.regression)
b = node.regression.coef_[0]
c = node.regression.intercept_
self.assertEqual(node.regression.predict([list(x.values())]), b * x[list(x.keys())[0]] + c)
self.assertEqual(tree.compute_f(x), node.regression.predict([list(x.values())]))
class TestDomainNode(unittest.TestCase):
def setUp(self):
self.val = 10
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
self.node = DomainNode(name="nome", domains=d, val=self.val)
def tearDown(self) -> None:
pass
def test_generate_regression(self):
self.node.generate_regression()
self.assertIsNotNone(self.node.regression)
self.assertIsNotNone(self.node.regression.coef_)
self.assertIsNotNone(self.node.regression.intercept_)
def test_contains(self):
self.assertTrue(self.node.contains({"x0": 0, "x1": 2}))
self.assertTrue(self.node.contains({"x0": 0.5, "x1": 2.5}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 2}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 3}))
self.assertFalse(self.node.contains({"x0": 0.2, "x1": 3}))
def test_kill(self):
self.node.dostuff(random=0.5)
self.assertIn(self.node.val, [self.val - 2, self.val - 1])
self.node.kill()
self.assertEqual(self.node.val, 0)
if __name__ == "__main__":
unittest.main()
| true
| true
|
7905b2b53e838ed24998c54cecb9396767c49bf0
| 14,062
|
py
|
Python
|
utils/train_eval_test.py
|
IronOnet/tensor2robot
|
351cecbf76b71d09b56a766b981e1a15f85d9528
|
[
"Apache-2.0"
] | 2
|
2021-10-31T01:06:00.000Z
|
2021-11-08T09:43:25.000Z
|
utils/train_eval_test.py
|
IronOnet/tensor2robot
|
351cecbf76b71d09b56a766b981e1a15f85d9528
|
[
"Apache-2.0"
] | null | null | null |
utils/train_eval_test.py
|
IronOnet/tensor2robot
|
351cecbf76b71d09b56a766b981e1a15f85d9528
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for tensor2robot.train_eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import gin
import mock
import numpy as np
from six.moves import zip
from tensor2robot.hooks import hook_builder
from tensor2robot.models import abstract_model
from tensor2robot.preprocessors import noop_preprocessor
from tensor2robot.utils import mocks
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf
from tensorflow.contrib import predictor as contrib_predictor
FLAGS = flags.FLAGS
_MAX_TRAIN_STEPS = 400
_EVAL_STEPS = 40
_BATCH_SIZE = 4
_EVAL_THROTTLE_SECS = 0.0
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self._mock = mock.MagicMock()
def begin(self):
self._mock.begin()
return
@property
def mock(self):
return self._mock
class FakeHookBuilder(hook_builder.HookBuilder):
def __init__(self):
self._hook = FakeHook()
def create_hooks(self, *args, **kwargs):
del args, kwargs
return [self._hook]
@property
def hook_mock(self):
return self._hook.mock
class TrainEvalTest(tf.test.TestCase):
def _compute_total_loss(self, labels, logits):
"""Summation of the categorical hinge loss for labels and logits."""
error = 0.
for label, logit in zip(labels, logits):
# Reference tensorflow implementation can be found in keras.losses.
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0., negative - positive + 1.)
return error
def test_train_eval_model(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
input_generator_eval=mock_input_generator_eval,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
train_hook_builders=[fake_hook_builder],
eval_hook_builders=[fake_hook_builder],
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
# We ensure that both numpy and tf_example inference models are exported.
best_exporter_numpy_path = os.path.join(model_dir, 'export',
'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
# There should be at least 1 exported model.
self.assertGreater(len(numpy_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(
model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(
tf.io.gfile.glob(best_exporter_tf_example_path))
# There should be at least 1 exported model.
self.assertGreater(len(tf_example_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(tf_example_model_paths), 5)
# We test both saved models within one test since the bulk of the time
# is spent training the model in the firstplace.
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(
input_fn=mock_input_generator_eval.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL))
# Now we can load our exported estimator graph with the numpy feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
numpy_predictor_fn = contrib_predictor.from_saved_model(
numpy_model_paths[-1])
features, labels = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(
labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for feature, label in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(
1, -1)})['logit'].flatten()
numpy_predictions.append(predicted)
# This ensures that we actually achieve near-perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
# Now we can load our exported estimator graph with the tf_example feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
tf_example_predictor_fn = contrib_predictor.from_saved_model(
tf_example_model_paths[-1])
tf_example_predictions = []
for feature, label in zip(features, labels):
# We have to create our serialized tf.Example proto.
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(
feature)
feed_dict = {
'input_example_tensor':
np.array(example.SerializeToString()).reshape(1,)
}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
# This ensures that we actually achieve perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
# The exported saved models both have to have the same performance and since
# we train on eval on the same fixed dataset the latest and greatest
# model error should also be the best.
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3)
def test_init_from_checkpoint_global_step(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# The model trains for 200 steps and saves a checkpoint each 100 steps and
# keeps 3 -> len == 3.
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS + 100,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# If the model was successful restored including the global step, only 1
# additional checkpoint to the init one should be created -> len == 2.
self.assertLen(
tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2)
def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(
mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(model_dir))
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
initial_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
# pylint: disable=g-complex-comprehension
initial_predictions = [
prediction['logit'] for prediction in list(
initial_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
# Re-initialize the model and train for one step, basically the same
# performance as the original model.
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(continue_model_dir))
for tensor_name, _ in tf.train.list_variables(model_dir):
if 'ExponentialMovingAverage' in tensor_name:
# These values are replaced by the swapping saver when using the
# use_avg_model_params.
continue
if 'Adam' in tensor_name:
# The adam optimizer values are not required.
continue
if 'global_step' in tensor_name:
# The global step will be incremented by 1.
continue
self.assertAllClose(
init_checkpoint.get_tensor(tensor_name),
continue_checkpoint.get_tensor(tensor_name),
atol=1e-3)
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
continue_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [
prediction['logit'] for prediction in list(
continue_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertTrue(
np.allclose(initial_predictions, continue_predictions, atol=1e-1))
# A randomly initialized model estimator with all the parameters.
random_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn)
random_predictions = [
prediction['logit'] for prediction in list(
random_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertFalse(
np.allclose(initial_predictions, random_predictions, atol=1e-2))
if __name__ == '__main__':
tf.test.main()
| 39.611268
| 80
| 0.732399
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import gin
import mock
import numpy as np
from six.moves import zip
from tensor2robot.hooks import hook_builder
from tensor2robot.models import abstract_model
from tensor2robot.preprocessors import noop_preprocessor
from tensor2robot.utils import mocks
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf
from tensorflow.contrib import predictor as contrib_predictor
FLAGS = flags.FLAGS
_MAX_TRAIN_STEPS = 400
_EVAL_STEPS = 40
_BATCH_SIZE = 4
_EVAL_THROTTLE_SECS = 0.0
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self._mock = mock.MagicMock()
def begin(self):
self._mock.begin()
return
@property
def mock(self):
return self._mock
class FakeHookBuilder(hook_builder.HookBuilder):
def __init__(self):
self._hook = FakeHook()
def create_hooks(self, *args, **kwargs):
del args, kwargs
return [self._hook]
@property
def hook_mock(self):
return self._hook.mock
class TrainEvalTest(tf.test.TestCase):
def _compute_total_loss(self, labels, logits):
error = 0.
for label, logit in zip(labels, logits):
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0., negative - positive + 1.)
return error
def test_train_eval_model(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
input_generator_eval=mock_input_generator_eval,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
train_hook_builders=[fake_hook_builder],
eval_hook_builders=[fake_hook_builder],
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
best_exporter_numpy_path = os.path.join(model_dir, 'export',
'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
self.assertGreater(len(numpy_model_paths), 0)
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(
model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(
tf.io.gfile.glob(best_exporter_tf_example_path))
self.assertGreater(len(tf_example_model_paths), 0)
self.assertLessEqual(len(tf_example_model_paths), 5)
estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(
input_fn=mock_input_generator_eval.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL))
numpy_predictor_fn = contrib_predictor.from_saved_model(
numpy_model_paths[-1])
features, labels = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(
labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for feature, label in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(
1, -1)})['logit'].flatten()
numpy_predictions.append(predicted)
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
tf_example_predictor_fn = contrib_predictor.from_saved_model(
tf_example_model_paths[-1])
tf_example_predictions = []
for feature, label in zip(features, labels):
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(
feature)
feed_dict = {
'input_example_tensor':
np.array(example.SerializeToString()).reshape(1,)
}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3)
def test_init_from_checkpoint_global_step(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS + 100,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertLen(
tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2)
def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(
mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(model_dir))
initial_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
initial_predictions = [
prediction['logit'] for prediction in list(
initial_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(continue_model_dir))
for tensor_name, _ in tf.train.list_variables(model_dir):
if 'ExponentialMovingAverage' in tensor_name:
continue
if 'Adam' in tensor_name:
continue
if 'global_step' in tensor_name:
continue
self.assertAllClose(
init_checkpoint.get_tensor(tensor_name),
continue_checkpoint.get_tensor(tensor_name),
atol=1e-3)
continue_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [
prediction['logit'] for prediction in list(
continue_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertTrue(
np.allclose(initial_predictions, continue_predictions, atol=1e-1))
random_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn)
random_predictions = [
prediction['logit'] for prediction in list(
random_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertFalse(
np.allclose(initial_predictions, random_predictions, atol=1e-2))
if __name__ == '__main__':
tf.test.main()
| true
| true
|
7905b2c7a8fd6c7658c3edec7d5b982f80dcb577
| 698
|
py
|
Python
|
selenium/filling form/form-2.py
|
araj29011998/Complete-Selenium-Automation
|
62da634fcf53028f820fcf8b6ec8cfe1fd29851c
|
[
"MIT"
] | null | null | null |
selenium/filling form/form-2.py
|
araj29011998/Complete-Selenium-Automation
|
62da634fcf53028f820fcf8b6ec8cfe1fd29851c
|
[
"MIT"
] | null | null | null |
selenium/filling form/form-2.py
|
araj29011998/Complete-Selenium-Automation
|
62da634fcf53028f820fcf8b6ec8cfe1fd29851c
|
[
"MIT"
] | null | null | null |
#filling 2nd form and validating ans
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver=webdriver.Chrome("../chromedriver.exe")
driver.get("https://www.seleniumeasy.com/test/basic-first-form-demo.html")
num1=2
num2=3
element1=driver.find_element(By.ID,"sum1").send_keys(num1)
element2=driver.find_element(By.ID,"sum2").send_keys(num2)
button=driver.find_element(By.XPATH,"/html/body/div[2]/div/div[2]/div[2]/div[2]/form/button").click()
displayed_sum=driver.find_element(By.ID,"displayvalue").text
if (num1+num2) == int(displayed_sum):
print("same")
else:
print("different")
time.sleep(5)
driver.quit()
| 23.266667
| 102
| 0.7149
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver=webdriver.Chrome("../chromedriver.exe")
driver.get("https://www.seleniumeasy.com/test/basic-first-form-demo.html")
num1=2
num2=3
element1=driver.find_element(By.ID,"sum1").send_keys(num1)
element2=driver.find_element(By.ID,"sum2").send_keys(num2)
button=driver.find_element(By.XPATH,"/html/body/div[2]/div/div[2]/div[2]/div[2]/form/button").click()
displayed_sum=driver.find_element(By.ID,"displayvalue").text
if (num1+num2) == int(displayed_sum):
print("same")
else:
print("different")
time.sleep(5)
driver.quit()
| true
| true
|
7905b3003ddac0cedd33e02ab11c07e836d1fc0e
| 6,582
|
py
|
Python
|
train_cifar.py
|
diceroll/metric_learning
|
272c7ba13208e14b91d294456d1f7fe762fe80d5
|
[
"MIT"
] | 1
|
2019-04-21T14:41:07.000Z
|
2019-04-21T14:41:07.000Z
|
train_cifar.py
|
diceroll/metric_learning
|
272c7ba13208e14b91d294456d1f7fe762fe80d5
|
[
"MIT"
] | null | null | null |
train_cifar.py
|
diceroll/metric_learning
|
272c7ba13208e14b91d294456d1f7fe762fe80d5
|
[
"MIT"
] | null | null | null |
import argparse
import multiprocessing
import random
import shutil
from datetime import datetime
from functools import partial
from pathlib import Path
import chainer
import chainer.functions as F
import chainer.links as L
import cupy
import numpy as np
from chainer import iterators, optimizers, serializers
from chainer.datasets import TransformDataset, get_cifar10
from chainer.training import StandardUpdater, Trainer, extensions
import augmentation
from metric_learning import MetricLearnClassifier
from modified_evaluator import ModifiedEvaluator
from modified_updater import ModifiedUpdater
from resnet import ResNet50
def apply_augmentation(inputs, mean, std, angle=(-5, 5), scale=(1, 1.2),
crop_size=None, train=True):
img, label = inputs
img = img.copy()
img = img.transpose(1, 2, 0)
if train:
img, _ = augmentation.gamma_correction(img)
img -= mean[None, None, :]
img /= std[None, None, :]
if train:
img, _ = augmentation.random_rotate(img, angle=angle)
if np.random.rand() < 0.5:
img, _ = augmentation.mirror(img)
if np.random.rand() < 0.5:
img, _ = augmentation.flip(img)
img, _ = augmentation.random_resize(img, scale=scale)
if crop_size is not None:
rnd1 = np.random.randint(img.shape[0] - crop_size)
rnd2 = np.random.randint(img.shape[1] - crop_size)
img = img[rnd1:rnd1 + crop_size, rnd2:rnd2 + crop_size, :]
img = img.transpose(2, 0, 1)
return img, label
def main():
parser = argparse.ArgumentParser(description='training mnist')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--batchsize', '-b', type=int, default=8,
help='Number of images in each mini-batch')
parser.add_argument('--seed', '-s', type=int, default=0,
help='Random seed')
parser.add_argument('--report_trigger', '-rt', type=str, default='1e',
help='Interval for reporting(Ex.100i, default:1e)')
parser.add_argument('--save_trigger', '-st', type=str, default='1e',
help='Interval for saving the model(Ex.100i, default:1e)')
parser.add_argument('--load_model', '-lm', type=str, default=None,
help='Path of the model object to load')
parser.add_argument('--load_optimizer', '-lo', type=str, default=None,
help='Path of the optimizer object to load')
args = parser.parse_args()
start_time = datetime.now()
save_dir = Path('output/{}'.format(start_time.strftime('%Y%m%d_%H%M')))
random.seed(args.seed)
np.random.seed(args.seed)
cupy.random.seed(args.seed)
model = MetricLearnClassifier(ResNet50(), 512, 10,
method='arcface', final_margin=0.5,
final_scale=64, target_epoch=100)
if args.load_model is not None:
serializers.load_npz(args.load_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = optimizers.Adam(alpha=1e-3, weight_decay_rate=5e-4, amsgrad=True)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
if args.load_optimizer is not None:
serializers.load_npz(args.load_optimizer, optimizer)
train_data, valid_data = get_cifar10(scale=255.)
mean = np.mean([x for x, _ in train_data], axis=(0, 2, 3))
std = np.std([x for x, _ in train_data], axis=(0, 2, 3))
train_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
valid_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
train_data = TransformDataset(train_data, train_transform)
valid_data = TransformDataset(valid_data, valid_transform)
train_iter = iterators.SerialIterator(train_data, args.batchsize)
valid_iter = iterators.SerialIterator(valid_data, args.batchsize, repeat=False, shuffle=False)
updater = ModifiedUpdater(train_iter, optimizer, device=args.gpu)
trainer = Trainer(updater, (args.epoch, 'epoch'), out=save_dir)
report_trigger = (int(args.report_trigger[:-1]), 'iteration' if args.report_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.LogReport(trigger=report_trigger))
trainer.extend(ModifiedEvaluator(valid_iter, model, device=args.gpu), name='val', trigger=report_trigger)
trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'main/accuracy', 'val/main/loss',
'val/main/accuracy', 'elapsed_time']), trigger=report_trigger)
trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key=report_trigger[1],
marker='.', file_name='loss.png', trigger=report_trigger))
trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key=report_trigger[1],
marker='.', file_name='accuracy.png', trigger=report_trigger))
save_trigger = (int(args.save_trigger[:-1]), 'iteration' if args.save_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.snapshot_object(model, filename='model_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.snapshot_object(optimizer, filename='optimizer_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.ExponentialShift('lr', 0.5), trigger=(30, 'epoch'))
if save_dir.exists():
shutil.rmtree(save_dir)
save_dir.mkdir()
(save_dir / 'training_details').mkdir()
# Write parameters text
with open(save_dir / 'training_details/train_params.txt', 'w') as f:
f.write('model: {}\n'.format(model.predictor.__class__.__name__))
f.write('n_epoch: {}\n'.format(args.epoch))
f.write('batch_size: {}\n'.format(args.batchsize))
f.write('n_data_train: {}\n'.format(len(train_data)))
f.write('n_data_val: {}\n'.format(len(valid_data)))
f.write('seed: {}\n'.format(args.seed))
trainer.run()
if __name__ == '__main__':
main()
| 43.589404
| 112
| 0.648891
|
import argparse
import multiprocessing
import random
import shutil
from datetime import datetime
from functools import partial
from pathlib import Path
import chainer
import chainer.functions as F
import chainer.links as L
import cupy
import numpy as np
from chainer import iterators, optimizers, serializers
from chainer.datasets import TransformDataset, get_cifar10
from chainer.training import StandardUpdater, Trainer, extensions
import augmentation
from metric_learning import MetricLearnClassifier
from modified_evaluator import ModifiedEvaluator
from modified_updater import ModifiedUpdater
from resnet import ResNet50
def apply_augmentation(inputs, mean, std, angle=(-5, 5), scale=(1, 1.2),
crop_size=None, train=True):
img, label = inputs
img = img.copy()
img = img.transpose(1, 2, 0)
if train:
img, _ = augmentation.gamma_correction(img)
img -= mean[None, None, :]
img /= std[None, None, :]
if train:
img, _ = augmentation.random_rotate(img, angle=angle)
if np.random.rand() < 0.5:
img, _ = augmentation.mirror(img)
if np.random.rand() < 0.5:
img, _ = augmentation.flip(img)
img, _ = augmentation.random_resize(img, scale=scale)
if crop_size is not None:
rnd1 = np.random.randint(img.shape[0] - crop_size)
rnd2 = np.random.randint(img.shape[1] - crop_size)
img = img[rnd1:rnd1 + crop_size, rnd2:rnd2 + crop_size, :]
img = img.transpose(2, 0, 1)
return img, label
def main():
parser = argparse.ArgumentParser(description='training mnist')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--batchsize', '-b', type=int, default=8,
help='Number of images in each mini-batch')
parser.add_argument('--seed', '-s', type=int, default=0,
help='Random seed')
parser.add_argument('--report_trigger', '-rt', type=str, default='1e',
help='Interval for reporting(Ex.100i, default:1e)')
parser.add_argument('--save_trigger', '-st', type=str, default='1e',
help='Interval for saving the model(Ex.100i, default:1e)')
parser.add_argument('--load_model', '-lm', type=str, default=None,
help='Path of the model object to load')
parser.add_argument('--load_optimizer', '-lo', type=str, default=None,
help='Path of the optimizer object to load')
args = parser.parse_args()
start_time = datetime.now()
save_dir = Path('output/{}'.format(start_time.strftime('%Y%m%d_%H%M')))
random.seed(args.seed)
np.random.seed(args.seed)
cupy.random.seed(args.seed)
model = MetricLearnClassifier(ResNet50(), 512, 10,
method='arcface', final_margin=0.5,
final_scale=64, target_epoch=100)
if args.load_model is not None:
serializers.load_npz(args.load_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = optimizers.Adam(alpha=1e-3, weight_decay_rate=5e-4, amsgrad=True)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
if args.load_optimizer is not None:
serializers.load_npz(args.load_optimizer, optimizer)
train_data, valid_data = get_cifar10(scale=255.)
mean = np.mean([x for x, _ in train_data], axis=(0, 2, 3))
std = np.std([x for x, _ in train_data], axis=(0, 2, 3))
train_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
valid_transform = partial(apply_augmentation, mean=mean, std=std, crop_size=28, train=True)
train_data = TransformDataset(train_data, train_transform)
valid_data = TransformDataset(valid_data, valid_transform)
train_iter = iterators.SerialIterator(train_data, args.batchsize)
valid_iter = iterators.SerialIterator(valid_data, args.batchsize, repeat=False, shuffle=False)
updater = ModifiedUpdater(train_iter, optimizer, device=args.gpu)
trainer = Trainer(updater, (args.epoch, 'epoch'), out=save_dir)
report_trigger = (int(args.report_trigger[:-1]), 'iteration' if args.report_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.LogReport(trigger=report_trigger))
trainer.extend(ModifiedEvaluator(valid_iter, model, device=args.gpu), name='val', trigger=report_trigger)
trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'main/accuracy', 'val/main/loss',
'val/main/accuracy', 'elapsed_time']), trigger=report_trigger)
trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key=report_trigger[1],
marker='.', file_name='loss.png', trigger=report_trigger))
trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key=report_trigger[1],
marker='.', file_name='accuracy.png', trigger=report_trigger))
save_trigger = (int(args.save_trigger[:-1]), 'iteration' if args.save_trigger[-1] == 'i' else 'epoch')
trainer.extend(extensions.snapshot_object(model, filename='model_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.snapshot_object(optimizer, filename='optimizer_{0}-{{.updater.{0}}}.npz'
.format(save_trigger[1])), trigger=save_trigger)
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.ExponentialShift('lr', 0.5), trigger=(30, 'epoch'))
if save_dir.exists():
shutil.rmtree(save_dir)
save_dir.mkdir()
(save_dir / 'training_details').mkdir()
with open(save_dir / 'training_details/train_params.txt', 'w') as f:
f.write('model: {}\n'.format(model.predictor.__class__.__name__))
f.write('n_epoch: {}\n'.format(args.epoch))
f.write('batch_size: {}\n'.format(args.batchsize))
f.write('n_data_train: {}\n'.format(len(train_data)))
f.write('n_data_val: {}\n'.format(len(valid_data)))
f.write('seed: {}\n'.format(args.seed))
trainer.run()
if __name__ == '__main__':
main()
| true
| true
|
7905b45415505e4df0c245fcd752d849b93e1a4f
| 673
|
py
|
Python
|
indigo-web/archive/tests.py
|
pericles-project/ERMR
|
99e19c476c813632d0508cdef65b4683e36f8e43
|
[
"Apache-2.0"
] | null | null | null |
indigo-web/archive/tests.py
|
pericles-project/ERMR
|
99e19c476c813632d0508cdef65b4683e36f8e43
|
[
"Apache-2.0"
] | null | null | null |
indigo-web/archive/tests.py
|
pericles-project/ERMR
|
99e19c476c813632d0508cdef65b4683e36f8e43
|
[
"Apache-2.0"
] | null | null | null |
"""Archive Tests
Copyright 2015 Archive Analytics Solutions - University of Liverpool
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
# Create your tests here.
| 32.047619
| 72
| 0.793462
|
from django.test import TestCase
| true
| true
|
7905b57f028b3f1ab6052b8fb9970a2c88f4f26c
| 403
|
py
|
Python
|
fme/tests/test_deltree_copy.py
|
artemkin/sandbox
|
0287e95d81c070f1399c5b0ceacdc37867075aec
|
[
"BSD-2-Clause"
] | null | null | null |
fme/tests/test_deltree_copy.py
|
artemkin/sandbox
|
0287e95d81c070f1399c5b0ceacdc37867075aec
|
[
"BSD-2-Clause"
] | null | null | null |
fme/tests/test_deltree_copy.py
|
artemkin/sandbox
|
0287e95d81c070f1399c5b0ceacdc37867075aec
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
script = r"""
MD Dir1
MD Dir1\Dir2
CD Dir1\Dir2
MF file2.dat
MD Dir3
CD Dir3
MF file3.dat
MD Dir4
CD Dir4
MF file4.dat
MD Dir5
CD Dir5
MF file5.dat
CD C:
DELTREE Dir1
MD Dir2
CD Dir2
MF a.txt
MF b.txt
CD C:
MD Dir3
COPY Dir2 Dir3
"""
expected = r"""
C:
|_DIR2
| |_a.txt
| |_b.txt
|
|_DIR3
|_DIR2
|_a.txt
|_b.txt
"""
import test
test.run(script, expected)
| 8.955556
| 26
| 0.635236
|
script = r"""
MD Dir1
MD Dir1\Dir2
CD Dir1\Dir2
MF file2.dat
MD Dir3
CD Dir3
MF file3.dat
MD Dir4
CD Dir4
MF file4.dat
MD Dir5
CD Dir5
MF file5.dat
CD C:
DELTREE Dir1
MD Dir2
CD Dir2
MF a.txt
MF b.txt
CD C:
MD Dir3
COPY Dir2 Dir3
"""
expected = r"""
C:
|_DIR2
| |_a.txt
| |_b.txt
|
|_DIR3
|_DIR2
|_a.txt
|_b.txt
"""
import test
test.run(script, expected)
| true
| true
|
7905b5d4d13729de04fb57050314dad2dd488470
| 115
|
py
|
Python
|
bridge/bridge/deploy/registry.py
|
jfdesroches/domino-research
|
a67b22f1aaff70e82bdb4a966b7fa960ae8843b2
|
[
"Apache-2.0"
] | null | null | null |
bridge/bridge/deploy/registry.py
|
jfdesroches/domino-research
|
a67b22f1aaff70e82bdb4a966b7fa960ae8843b2
|
[
"Apache-2.0"
] | null | null | null |
bridge/bridge/deploy/registry.py
|
jfdesroches/domino-research
|
a67b22f1aaff70e82bdb4a966b7fa960ae8843b2
|
[
"Apache-2.0"
] | null | null | null |
from bridge.deploy.sagemaker import SageMakerDeployTarget
DEPLOY_REGISTRY = {"sagemaker": SageMakerDeployTarget}
| 23
| 57
| 0.843478
|
from bridge.deploy.sagemaker import SageMakerDeployTarget
DEPLOY_REGISTRY = {"sagemaker": SageMakerDeployTarget}
| true
| true
|
7905b88a2f1d5904a06f7aaf99c619e3a743a4dc
| 8,108
|
py
|
Python
|
console_gateway_sdk/model/tuna_service/requirement_instance_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
console_gateway_sdk/model/tuna_service/requirement_instance_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
console_gateway_sdk/model/tuna_service/requirement_instance_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: requirement_instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from console_gateway_sdk.model.topboard import issue_pb2 as console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='requirement_instance.proto',
package='tuna_service',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_service'),
serialized_pb=_b('\n\x1arequirement_instance.proto\x12\x0ctuna_service\x1a.console_gateway_sdk/model/topboard/issue.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\x13RequirementInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08sequence\x18\x03 \x01(\t\x12\r\n\x05given\x18\x04 \x01(\t\x12\x0c\n\x04when\x18\x05 \x01(\t\x12\x0c\n\x04then\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x07 \x01(\t\x12\x17\n\x0f\x64\x61taDescription\x18\x08 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\t \x01(\t\x12\x0b\n\x03tag\x18\n \x01(\t\x12\x15\n\rinterfaceName\x18\x0b \x01(\t\x12*\n\tcontracts\x18\x0c \x03(\x0b\x32\x17.google.protobuf.Struct\x12\x1e\n\x05ISSUE\x18\r \x03(\x0b\x32\x0f.topboard.IssueBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_serviceb\x06proto3')
,
dependencies=[console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_REQUIREMENTINSTANCE = _descriptor.Descriptor(
name='RequirementInstance',
full_name='tuna_service.RequirementInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='tuna_service.RequirementInstance.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tuna_service.RequirementInstance.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sequence', full_name='tuna_service.RequirementInstance.sequence', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='given', full_name='tuna_service.RequirementInstance.given', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='when', full_name='tuna_service.RequirementInstance.when', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='then', full_name='tuna_service.RequirementInstance.then', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tuna_service.RequirementInstance.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataDescription', full_name='tuna_service.RequirementInstance.dataDescription', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='tuna_service.RequirementInstance.data', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tuna_service.RequirementInstance.tag', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interfaceName', full_name='tuna_service.RequirementInstance.interfaceName', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contracts', full_name='tuna_service.RequirementInstance.contracts', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ISSUE', full_name='tuna_service.RequirementInstance.ISSUE', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=404,
)
_REQUIREMENTINSTANCE.fields_by_name['contracts'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_REQUIREMENTINSTANCE.fields_by_name['ISSUE'].message_type = console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2._ISSUE
DESCRIPTOR.message_types_by_name['RequirementInstance'] = _REQUIREMENTINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequirementInstance = _reflection.GeneratedProtocolMessageType('RequirementInstance', (_message.Message,), {
'DESCRIPTOR' : _REQUIREMENTINSTANCE,
'__module__' : 'requirement_instance_pb2'
# @@protoc_insertion_point(class_scope:tuna_service.RequirementInstance)
})
_sym_db.RegisterMessage(RequirementInstance)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 50.360248
| 812
| 0.75555
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from console_gateway_sdk.model.topboard import issue_pb2 as console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='requirement_instance.proto',
package='tuna_service',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_service'),
serialized_pb=_b('\n\x1arequirement_instance.proto\x12\x0ctuna_service\x1a.console_gateway_sdk/model/topboard/issue.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\x13RequirementInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08sequence\x18\x03 \x01(\t\x12\r\n\x05given\x18\x04 \x01(\t\x12\x0c\n\x04when\x18\x05 \x01(\t\x12\x0c\n\x04then\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x07 \x01(\t\x12\x17\n\x0f\x64\x61taDescription\x18\x08 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\t \x01(\t\x12\x0b\n\x03tag\x18\n \x01(\t\x12\x15\n\rinterfaceName\x18\x0b \x01(\t\x12*\n\tcontracts\x18\x0c \x03(\x0b\x32\x17.google.protobuf.Struct\x12\x1e\n\x05ISSUE\x18\r \x03(\x0b\x32\x0f.topboard.IssueBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_serviceb\x06proto3')
,
dependencies=[console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_REQUIREMENTINSTANCE = _descriptor.Descriptor(
name='RequirementInstance',
full_name='tuna_service.RequirementInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='tuna_service.RequirementInstance.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tuna_service.RequirementInstance.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sequence', full_name='tuna_service.RequirementInstance.sequence', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='given', full_name='tuna_service.RequirementInstance.given', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='when', full_name='tuna_service.RequirementInstance.when', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='then', full_name='tuna_service.RequirementInstance.then', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tuna_service.RequirementInstance.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataDescription', full_name='tuna_service.RequirementInstance.dataDescription', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='tuna_service.RequirementInstance.data', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tuna_service.RequirementInstance.tag', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interfaceName', full_name='tuna_service.RequirementInstance.interfaceName', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contracts', full_name='tuna_service.RequirementInstance.contracts', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ISSUE', full_name='tuna_service.RequirementInstance.ISSUE', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=404,
)
_REQUIREMENTINSTANCE.fields_by_name['contracts'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_REQUIREMENTINSTANCE.fields_by_name['ISSUE'].message_type = console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2._ISSUE
DESCRIPTOR.message_types_by_name['RequirementInstance'] = _REQUIREMENTINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequirementInstance = _reflection.GeneratedProtocolMessageType('RequirementInstance', (_message.Message,), {
'DESCRIPTOR' : _REQUIREMENTINSTANCE,
'__module__' : 'requirement_instance_pb2'
# @@protoc_insertion_point(class_scope:tuna_service.RequirementInstance)
})
_sym_db.RegisterMessage(RequirementInstance)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
7905b8ca2d4609edd3f15b2822a370b7b0c89611
| 9,525
|
py
|
Python
|
timed_structures.py
|
gavalle94/pyQueue
|
7d5ae9d732883a200e1a79dcf0330142ed112cd4
|
[
"MIT"
] | 1
|
2018-09-19T11:43:16.000Z
|
2018-09-19T11:43:16.000Z
|
timed_structures.py
|
gavalle94/P2P-Sim
|
f6ed633ca1cfec51bd8f8e7b9f042cdd55472648
|
[
"MIT"
] | null | null | null |
timed_structures.py
|
gavalle94/P2P-Sim
|
f6ed633ca1cfec51bd8f8e7b9f042cdd55472648
|
[
"MIT"
] | null | null | null |
class TimedData(object):
"""
Struttura dati per eventi accompagnati da un informazione temporale discreta (timestamp o intervallo)
"""
def __init__(self, data, time, timestamp=True):
"""
I parametri di input sono
- "data": il dato che si vuole memorizzare (di qualsiasi natura)
- "time": l'informazione temporale associata al dato (numero intero)
- "timestamp": flag booleana. Se vero, il campo "time" e' un timestamp; altrimenti,
e' un intervallo di tempo
"""
# Controllo dell'input: parametro "time"
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
# Creo la struttura dati
self.data = data
self.time = time
self.timestamp = True if timestamp else False
def __eq__(self, other):
c1 = self.data == other.data
c2 = self.time == other.time
c3 = self.timestamp == other.timestamp
return c1 and c2 and c3
def __str__(self):
return '(data=%s, time=%s, timestamp=%s)' % (self.data, self.time, self.timestamp)
def get_data(self):
"""
Ritorna il campo "data"
"""
return self.data
def get_time(self):
"""
Ritorna il campo "time"
"""
return self.time
class TimedArray(object):
"""
Array di oggetti TimedData
"""
def __init__(self, timestamp=True, empty=True):
"""
La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un
intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False,
un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)
"""
self._list = []
self.timestamp = (timestamp is True)
if not empty:
# Creo il nodo di partenza
self.append(TimedData(0, 0, self.timestamp))
def __str__(self):
x = ''
first = True
for i in self._list:
if first:
x += str(i)
first = False
else:
x += ', ' + str(i)
return '(timestamp=%s, [%s]' % (self.timestamp, x)
def get_list(self):
"""
Ritorna l'elenco di oggetti "TimedData", memorizzati come lista
"""
return self._list
def get_data_list(self):
"""
Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_data(), self._list)
def get_time_list(self):
"""
Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_time(), self._list)
def has_time_intervals(self):
"""
Ritorna True se gli elementi del vettore hanno associato un intervallo temporale
"""
return self.timestamp is False
def append(self, item):
"""
Aggiungo un elemento alla lista
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if item.timestamp != self.timestamp:
raise ValueError(
'"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp)
# Accodo l'elemento alla lista
self._list.append(item)
def remove(self, item):
"""
Questa funzione rimuove "item" (se presente) dall'array
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('the item to remove must be a "TimedData" object')
# Elimino l'oggetto, se presente
if item in self._list:
self._list.remove(item)
def remove_all(self, items):
"""
Questa funzione permette di rimuovere un elenco di oggetti "TimedData"
"""
# Controllo dei parametri di input: "items"
if not isinstance(items, (list, tuple)):
raise TypeError('"items" parameter must be an array')
# Elimino un oggetto per volta
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects')
def filter(self, f):
"""
Questa funzione applica la funzione f per filtrare il contenuto del vettore
"""
res = TimedArray(self.timestamp, empty=True)
res._list = filter(
f,
self._list
)
return res
def filter_data_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_data() <= end
)
def filter_time_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_time() <= end
)
def search(self, to_search):
"""
Funzione di ricerca all'interno del contenuto del vettore.
Se "timestamp" e' True, la chiave per la ricerca e' il timestamp: altrimenti,
la chiave diventa il contenuto a cui e' associato l'intervallo temporale.
"""
if self.timestamp:
# La chiave di ricerca e' "time", un numero intero
res = self.search_by_time(to_search)
else:
# La chiave di ricerca e' "data", un dato di qualsiasi tipo
res = self.search_by_data(to_search)
# Risultati di ricerca
return res
def search_by_data(self, to_search):
"""
Funzione di ricerca per campo "data", all'interno del vettore
"""
research = (lambda x: x.data == to_search)
return filter(research, self._list)
def search_by_datas(self, search_params):
"""
Funzione di ricerca per campo "data", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_data(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def search_by_time(self, to_search):
"""
Funzione di ricerca per campo "time", all'interno del vettore
Il parametro "toSearch" deve essere un numero intero
"""
if not isinstance(to_search, (int, long)):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: x.time == to_search)
return filter(research, self._list)
def search_by_times(self, search_params):
"""
Funzione di ricerca per campo "time", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_time(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def contains(self, to_search):
"""
La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca
"toSearch" specificata, produce risultati
"""
return len(self.search(to_search)) > 0
def update(self, to_search, new_value):
"""
Questa funzione aggiorna il contenuto degli elementi del vettore che
soddisfano il criterio di ricerca specificato
- "toSearch" e' la chiave di ricerca
- "newValue" e' il valore aggiornato da inserire
"""
# Effettuo una ricerca
items = self.search(to_search)
# Definisco il criterio di aggiornamento
if self.timestamp:
# La chiave di ricerca e' "time": aggiorno "data"
# update_function = (lambda x: x.data = newValue)
def update_function(x):
x.data = new_value
else:
# La chiave di ricerca e' "data": aggiorno "time"
# update_function = (lambda x: x.time = newValue)
def update_function(x):
x.time = new_value
# Aggiorno gli elementi
map(update_function, items)
def insert_or_update(self, time_to_search, data_value):
if self.contains(time_to_search):
self.update(time_to_search, data_value)
else:
self.append(
TimedData(data_value, time_to_search, self.timestamp)
)
| 35.541045
| 111
| 0.586247
|
class TimedData(object):
def __init__(self, data, time, timestamp=True):
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
# Creo la struttura dati
self.data = data
self.time = time
self.timestamp = True if timestamp else False
def __eq__(self, other):
c1 = self.data == other.data
c2 = self.time == other.time
c3 = self.timestamp == other.timestamp
return c1 and c2 and c3
def __str__(self):
return '(data=%s, time=%s, timestamp=%s)' % (self.data, self.time, self.timestamp)
def get_data(self):
return self.data
def get_time(self):
return self.time
class TimedArray(object):
def __init__(self, timestamp=True, empty=True):
self._list = []
self.timestamp = (timestamp is True)
if not empty:
# Creo il nodo di partenza
self.append(TimedData(0, 0, self.timestamp))
def __str__(self):
x = ''
first = True
for i in self._list:
if first:
x += str(i)
first = False
else:
x += ', ' + str(i)
return '(timestamp=%s, [%s]' % (self.timestamp, x)
def get_list(self):
return self._list
def get_data_list(self):
return map(lambda x: x.get_data(), self._list)
def get_time_list(self):
return map(lambda x: x.get_time(), self._list)
def has_time_intervals(self):
return self.timestamp is False
def append(self, item):
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if item.timestamp != self.timestamp:
raise ValueError(
'"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp)
# Accodo l'elemento alla lista
self._list.append(item)
def remove(self, item):
if not isinstance(item, TimedData):
raise TypeError('the item to remove must be a "TimedData" object')
if item in self._list:
self._list.remove(item)
def remove_all(self, items):
# Controllo dei parametri di input: "items"
if not isinstance(items, (list, tuple)):
raise TypeError('"items" parameter must be an array')
# Elimino un oggetto per volta
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects')
def filter(self, f):
res = TimedArray(self.timestamp, empty=True)
res._list = filter(
f,
self._list
)
return res
def filter_data_range(self, start, end):
return self.filter(
lambda x: start <= x.get_data() <= end
)
def filter_time_range(self, start, end):
return self.filter(
lambda x: start <= x.get_time() <= end
)
def search(self, to_search):
if self.timestamp:
# La chiave di ricerca e' "time", un numero intero
res = self.search_by_time(to_search)
else:
res = self.search_by_data(to_search)
# Risultati di ricerca
return res
def search_by_data(self, to_search):
research = (lambda x: x.data == to_search)
return filter(research, self._list)
def search_by_datas(self, search_params):
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_data(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def search_by_time(self, to_search):
if not isinstance(to_search, (int, long)):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: x.time == to_search)
return filter(research, self._list)
def search_by_times(self, search_params):
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_time(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def contains(self, to_search):
return len(self.search(to_search)) > 0
def update(self, to_search, new_value):
# Effettuo una ricerca
items = self.search(to_search)
# Definisco il criterio di aggiornamento
if self.timestamp:
# La chiave di ricerca e' "time": aggiorno "data"
def update_function(x):
x.data = new_value
else:
# update_function = (lambda x: x.time = newValue)
def update_function(x):
x.time = new_value
# Aggiorno gli elementi
map(update_function, items)
def insert_or_update(self, time_to_search, data_value):
if self.contains(time_to_search):
self.update(time_to_search, data_value)
else:
self.append(
TimedData(data_value, time_to_search, self.timestamp)
)
| true
| true
|
7905bb6532dc2a7620792f0293357acdc78323ad
| 2,684
|
py
|
Python
|
search_script.py
|
collabnix/openusm
|
73c0fea36b1fa5f469380b8b8cf79f393613e335
|
[
"MIT"
] | 4
|
2019-08-04T05:50:46.000Z
|
2020-04-16T19:24:11.000Z
|
search_script.py
|
collabnix/openusm
|
73c0fea36b1fa5f469380b8b8cf79f393613e335
|
[
"MIT"
] | null | null | null |
search_script.py
|
collabnix/openusm
|
73c0fea36b1fa5f469380b8b8cf79f393613e335
|
[
"MIT"
] | 6
|
2019-08-03T12:57:47.000Z
|
2020-06-08T01:50:43.000Z
|
from __future__ import division
import requests
import json
import sys
import os
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
try:
# idrac_ip = os.environ['IDRAC_IP']
# idrac_username = os.environ['IDRAC_USERNAME']
# idrac_password = os.environ['IDRAC_PASSWORD']
# elastic_ip = os.environ['ELASTIC_IP']
# elastic_username = os.environ['ELASTIC_USERNAME']
# elastic_password = os.environ['ELASTIC_PASSWORD']
idrac_ip="100.98.26.49"
idrac_username="root"
idrac_password="calvin"
elastic_ip="100.98.26.172"
elastic_username="elastic"
elastic_password="changeme"
es = Elasticsearch([elastic_ip],
http_auth=(elastic_username, elastic_password),
scheme="http",
port=9200,
)
except Exception as e:
print("- FAIL: You must pass in script name along with iDRAC IP / iDRAC username / iDRAC password")
sys.exit(0)
def retrieve_logs():
index_name="lc"+idrac_ip
res=es.search(index=index_name, body={
"query":{
"range": {
"timestamp": {
"gte" : "now-5m",
"lt" : "now"
}
}
}
}
)
# print(data)
codes = {}
code_types={}
for i in res['hits']['hits']:
#print(i)
#print("\n")
for key,value in i['_source'].items():
if key=='MessageID':
code=value
code_type=value[0:3]
#print(code_type)
if code in codes:
codes[code]=codes[code]+1
else:
codes.update({code: 1})
if code_type in code_types:
code_types[code_type]=code_types[code_type]+1
else:
code_types.update({code_type: 1})
total_errors=sum(codes.values())
# print total_errors
error_percentage={}
print "\nFor Server: ",idrac_ip
# print "__________________________ \n\n\n"
print("\n\n\n")
print "Error Codes Occurrence Percentage "
print "____________________________________________ \n"
for key,value in codes.items():
error_percentage[key]= (value/total_errors)*100
print key," ",value," ",error_percentage[key],"%"
print "\n"
print "Error Types Occurrence "
print "__________________________ \n"
for key,value in code_types.items():
print key," ",value
# print(codes)
# print(code_types)
# print (total_errors)
# print error_percentage
retrieve_logs()
| 28.252632
| 103
| 0.564083
|
from __future__ import division
import requests
import json
import sys
import os
from elasticsearch import Elasticsearch
from elasticsearch import exceptions
try:
idrac_ip="100.98.26.49"
idrac_username="root"
idrac_password="calvin"
elastic_ip="100.98.26.172"
elastic_username="elastic"
elastic_password="changeme"
es = Elasticsearch([elastic_ip],
http_auth=(elastic_username, elastic_password),
scheme="http",
port=9200,
)
except Exception as e:
print("- FAIL: You must pass in script name along with iDRAC IP / iDRAC username / iDRAC password")
sys.exit(0)
def retrieve_logs():
index_name="lc"+idrac_ip
res=es.search(index=index_name, body={
"query":{
"range": {
"timestamp": {
"gte" : "now-5m",
"lt" : "now"
}
}
}
}
)
codes = {}
code_types={}
for i in res['hits']['hits']:
for key,value in i['_source'].items():
if key=='MessageID':
code=value
code_type=value[0:3]
if code in codes:
codes[code]=codes[code]+1
else:
codes.update({code: 1})
if code_type in code_types:
code_types[code_type]=code_types[code_type]+1
else:
code_types.update({code_type: 1})
total_errors=sum(codes.values())
error_percentage={}
print "\nFor Server: ",idrac_ip
print("\n\n\n")
print "Error Codes Occurrence Percentage "
print "____________________________________________ \n"
for key,value in codes.items():
error_percentage[key]= (value/total_errors)*100
print key," ",value," ",error_percentage[key],"%"
print "\n"
print "Error Types Occurrence "
print "__________________________ \n"
for key,value in code_types.items():
print key," ",value
retrieve_logs()
| false
| true
|
7905bc2147b61285b910192e1759b412fc9029ee
| 67
|
py
|
Python
|
webpie/Version.py
|
webpie/webpie
|
c7f1bc29a63c0be683c60756165a6c65260211f9
|
[
"BSD-3-Clause"
] | 2
|
2021-12-10T16:12:51.000Z
|
2022-01-06T17:29:12.000Z
|
webpie/Version.py
|
webpie/webpie
|
c7f1bc29a63c0be683c60756165a6c65260211f9
|
[
"BSD-3-Clause"
] | null | null | null |
webpie/Version.py
|
webpie/webpie
|
c7f1bc29a63c0be683c60756165a6c65260211f9
|
[
"BSD-3-Clause"
] | null | null | null |
Version = "5.6.5"
if __name__ == "__main__":
print (Version)
| 11.166667
| 26
| 0.597015
|
Version = "5.6.5"
if __name__ == "__main__":
print (Version)
| true
| true
|
7905bd612c27e69fb9a7c4aa6088735210d22dfe
| 26,362
|
py
|
Python
|
venv/lib/python3.8/site-packages/statsmodels/genmod/families/links.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 6,931
|
2015-01-01T11:41:55.000Z
|
2022-03-31T17:03:24.000Z
|
venv/lib/python3.8/site-packages/statsmodels/genmod/families/links.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 6,137
|
2015-01-01T00:33:45.000Z
|
2022-03-31T22:53:17.000Z
|
venv/lib/python3.8/site-packages/statsmodels/genmod/families/links.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 2,608
|
2015-01-02T21:32:31.000Z
|
2022-03-31T07:38:30.000Z
|
'''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : ndarray
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : ndarray
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
return 1 / self.deriv(self.inverse(z))
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
iz = self.inverse(z)
return -self.deriv2(iz) / self.deriv(iz)**3
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : ndarray
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : ndarray
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : ndarray
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
g'(p) : ndarray
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : ndarray
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
if self.power == 1:
return p
else:
return np.power(p, self.power)
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : ndarray
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
if self.power == 1:
return z
else:
return np.power(z, 1. / self.power)
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
if self.power == 1:
return np.ones_like(p)
else:
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
if self.power == 1:
return np.zeros_like(p)
else:
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.ones_like(z)
else:
return np.power(z, (1 - self.power)/self.power) / self.power
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.zeros_like(z)
else:
return ((1 - self.power) *
np.power(z, (1 - 2*self.power)/self.power) / self.power**2)
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
r"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : ndarray
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
p : ndarray
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : ndarray
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : ndarray
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3
def deriv2_numdiff(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(p, self.deriv, centered=True)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the logit function.
This is just the pdf in a CDFLink,
"""
return self.dbn.pdf(z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)''(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This method should be overwritten by subclasses.
The inherited method is implemented through numerical differentiation.
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
-----
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function
This is the derivative of the pdf in a CDFLink
"""
return - z * self.dbn.pdf(z)
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return linpred / self.dbn.pdf(linpred)**2
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g''(p) : ndarray
Value of the second derivative of Cauchy link function at `p`
"""
p = self._clean(p)
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
def inverse_deriv2(self, z):
return - 2 * z / (np.pi * (z**2 + 1)**2)
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class LogLog(Logit):
"""
The log-log transform
LogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
"""
def __call__(self, p):
"""
Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The LogLog transform of `p`
Notes
-----
g(p) = -log(-log(p))
"""
p = self._clean(p)
return -np.log(-np.log(p))
def inverse(self, z):
"""
Inverse of Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = exp(-exp(-`z`))
"""
return np.exp(-np.exp(-z))
def deriv(self, p):
"""
Derivative of Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the LogLog transform link function
Notes
-----
g'(p) = - 1 /(p * log(p))
"""
p = self._clean(p)
return -1. / (p * (np.log(p)))
def deriv2(self, p):
"""
Second derivative of the Log-Log link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the LogLog link function
"""
p = self._clean(p)
d2 = (1 + np.log(p)) / (p * (np.log(p)))**2
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the LogLog link function
"""
return np.exp(-np.exp(-z) - z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)''(z) : ndarray
The second derivative of the inverse of the LogLog link function
"""
return self.inverse_deriv(z) * (np.exp(-z) - 1)
class loglog(LogLog):
"""
The LogLog transform link function.
Notes
-----
g(`p`) = -log(-log(`p`))
loglog is an alias for LogLog
loglog = LogLog()
"""
pass
class NegativeBinomial(Link):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self, p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
| 21.986656
| 79
| 0.49082
|
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
def __call__(self, p):
return NotImplementedError
def inverse(self, z):
return NotImplementedError
def deriv(self, p):
return NotImplementedError
def deriv2(self, p):
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv)
def inverse_deriv(self, z):
return 1 / self.deriv(self.inverse(z))
def inverse_deriv2(self, z):
iz = self.inverse(z)
return -self.deriv2(iz) / self.deriv(iz)**3
class Logit(Link):
def _clean(self, p):
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
if self.power == 1:
return p
else:
return np.power(p, self.power)
def inverse(self, z):
if self.power == 1:
return z
else:
return np.power(z, 1. / self.power)
def deriv(self, p):
if self.power == 1:
return np.ones_like(p)
else:
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
if self.power == 1:
return np.zeros_like(p)
else:
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
if self.power == 1:
return np.ones_like(z)
else:
return np.power(z, (1 - self.power)/self.power) / self.power
def inverse_deriv2(self, z):
if self.power == 1:
return np.zeros_like(z)
else:
return ((1 - self.power) *
np.power(z, (1 - 2*self.power)/self.power) / self.power**2)
class inverse_power(Power):
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
x = self._clean(p)
return np.log(x)
def inverse(self, z):
return np.exp(z)
def deriv(self, p):
p = self._clean(p)
return 1. / p
def deriv2(self, p):
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
return np.exp(z)
class log(Log):
pass
class CDFLink(Logit):
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
return self.dbn.cdf(z)
def deriv(self, p):
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
p = self._clean(p)
linpred = self.dbn.ppf(p)
return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3
def deriv2_numdiff(self, p):
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
return _approx_fprime_scalar(p, self.deriv, centered=True)
def inverse_deriv(self, z):
return self.dbn.pdf(z)
def inverse_deriv2(self, z):
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)
class probit(CDFLink):
def inverse_deriv2(self, z):
return - z * self.dbn.pdf(z)
def deriv2(self, p):
p = self._clean(p)
linpred = self.dbn.ppf(p)
return linpred / self.dbn.pdf(linpred)**2
class cauchy(CDFLink):
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
p = self._clean(p)
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
def inverse_deriv2(self, z):
return - 2 * z / (np.pi * (z**2 + 1)**2)
class CLogLog(Logit):
def __call__(self, p):
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
pass
class LogLog(Logit):
def __call__(self, p):
p = self._clean(p)
return -np.log(-np.log(p))
def inverse(self, z):
return np.exp(-np.exp(-z))
def deriv(self, p):
p = self._clean(p)
return -1. / (p * (np.log(p)))
def deriv2(self, p):
p = self._clean(p)
d2 = (1 + np.log(p)) / (p * (np.log(p)))**2
return d2
def inverse_deriv(self, z):
return np.exp(-np.exp(-z) - z)
def inverse_deriv2(self, z):
return self.inverse_deriv(z) * (np.exp(-z) - 1)
class loglog(LogLog):
pass
class NegativeBinomial(Link):
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
return 1/(p + self.alpha * p**2)
def deriv2(self, p):
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
pass
| true
| true
|
7905bd74cc44f2f5e0994d8ff553011e5f60c970
| 1,145
|
py
|
Python
|
sprog/namecheap_ftp_upload.py
|
PaulKlinger/Sprog-Backend
|
fd238ee092f20caf8a3f0e4529f0d200ef71c6c0
|
[
"MIT",
"BSD-3-Clause"
] | 4
|
2017-08-20T13:23:02.000Z
|
2020-07-30T13:28:38.000Z
|
sprog/namecheap_ftp_upload.py
|
PaulKlinger/Sprog-Backend
|
fd238ee092f20caf8a3f0e4529f0d200ef71c6c0
|
[
"MIT",
"BSD-3-Clause"
] | 3
|
2018-03-09T16:46:33.000Z
|
2019-04-08T16:52:31.000Z
|
sprog/namecheap_ftp_upload.py
|
PaulKlinger/Sprog-Backend
|
fd238ee092f20caf8a3f0e4529f0d200ef71c6c0
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-03-25T22:33:55.000Z
|
2020-03-25T22:33:55.000Z
|
import pysftp
import os.path
def upload_sprog_to_namecheap(tmpdir, passwords):
with pysftp.Connection(passwords["NAMECHEAP_SERVER"], username=passwords["NAMECHEAP_USERNAME"],
password=passwords["NAMECHEAP_PASSWORD"], port=passwords["NAMECHEAP_PORT"]) as sftp:
with sftp.cd('public_html'):
print("uploading sprog.pdf")
sftp.put("sprog.pdf")
print("uploading sprog_small.pdf")
sftp.put("small_sprog.pdf", "sprog_small.pdf")
print("uploading sprog.html")
sftp.put(os.path.join(tmpdir, "sprog.html"), "sprog.html")
print("uploading sprog.json")
sftp.put("poems.json", "poems.json")
print("uploading sprog.json.gz")
sftp.put("poems.json.gz", "poems.json.gz")
print("uploading sprog_60days.json.gz")
sftp.put("poems_60days.json.gz", "poems_60days.json.gz")
print("uploading sprog.rss")
sftp.put("sprog.rss", "sprog.rss")
print("uploading sprog_no_context.rss")
sftp.put("sprog_no_context.rss", "sprog_no_context.rss")
| 45.8
| 111
| 0.611354
|
import pysftp
import os.path
def upload_sprog_to_namecheap(tmpdir, passwords):
with pysftp.Connection(passwords["NAMECHEAP_SERVER"], username=passwords["NAMECHEAP_USERNAME"],
password=passwords["NAMECHEAP_PASSWORD"], port=passwords["NAMECHEAP_PORT"]) as sftp:
with sftp.cd('public_html'):
print("uploading sprog.pdf")
sftp.put("sprog.pdf")
print("uploading sprog_small.pdf")
sftp.put("small_sprog.pdf", "sprog_small.pdf")
print("uploading sprog.html")
sftp.put(os.path.join(tmpdir, "sprog.html"), "sprog.html")
print("uploading sprog.json")
sftp.put("poems.json", "poems.json")
print("uploading sprog.json.gz")
sftp.put("poems.json.gz", "poems.json.gz")
print("uploading sprog_60days.json.gz")
sftp.put("poems_60days.json.gz", "poems_60days.json.gz")
print("uploading sprog.rss")
sftp.put("sprog.rss", "sprog.rss")
print("uploading sprog_no_context.rss")
sftp.put("sprog_no_context.rss", "sprog_no_context.rss")
| true
| true
|
7905bdaaf9f244f30b7bccf94b468705056a3804
| 6,839
|
py
|
Python
|
tests/rbac/common/addresser/user_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/common/addresser/user_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/common/addresser/user_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Test User Addresser"""
import logging
import pytest
from rbac.common import addresser
from tests.rbac.common.assertions import TestAssertions
LOGGER = logging.getLogger(__name__)
@pytest.mark.addressing
@pytest.mark.library
class TestUserAddresser(TestAssertions):
"""Test User Addresser"""
def test_address(self):
"""Tests address makes an address that identifies as the correct AddressSpace"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
self.assertIsAddress(user_address)
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
def test_unique_id(self):
"""Tests that unique_id generates a unique identifier and is unique"""
id1 = addresser.user.unique_id()
id2 = addresser.user.unique_id()
self.assertIsIdentifier(id1)
self.assertIsIdentifier(id2)
self.assertNotEqual(id1, id2)
def test_get_address_type(self):
"""Tests that get_address_type returns AddressSpace.USER if it is a user
address, and None if it is of another address type"""
user_address = addresser.user.address(addresser.user.unique_id())
role_address = addresser.role.address(addresser.role.unique_id())
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.user.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertIsNone(addresser.user.get_address_type(role_address))
self.assertEqual(
addresser.get_address_type(role_address),
addresser.AddressSpace.ROLES_ATTRIBUTES,
)
def test_get_addresser(self):
"""Test that get_addresser returns the addresser class if it is a
user address, and None if it is of another address type"""
user_address = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsInstance(
addresser.get_addresser(user_address), type(addresser.user)
)
self.assertIsInstance(
addresser.user.get_addresser(user_address), type(addresser.user)
)
self.assertIsNone(addresser.user.get_addresser(other_address))
def test_user_parse(self):
"""Test addresser.user.parse returns a parsed address if it is a user address"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.user.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_addresser_parse(self):
"""Test addresser.parse returns a parsed address"""
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_parse_other(self):
"""Test that parse returns None if it is not a user address"""
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsNone(addresser.user.parse(other_address))
def test_addresses_are(self):
"""Test that addresses_are returns True if all addresses are a user
addresses, and False if any addresses are if a different address type"""
user_address1 = addresser.user.address(addresser.user.unique_id())
user_address2 = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertTrue(addresser.user.addresses_are([user_address1]))
self.assertTrue(addresser.user.addresses_are([user_address1, user_address2]))
self.assertFalse(addresser.user.addresses_are([other_address]))
self.assertFalse(addresser.user.addresses_are([user_address1, other_address]))
self.assertFalse(addresser.user.addresses_are([other_address, user_address1]))
self.assertTrue(addresser.user.addresses_are([]))
def test_address_deterministic(self):
"""Tests address makes an address that identifies as the correct AddressSpace"""
user_id1 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id1)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
def test_address_random(self):
"""Tests address makes a unique address given different inputs"""
user_id1 = addresser.user.unique_id()
user_id2 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id2)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertNotEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.get_address_type(user_address2), addresser.AddressSpace.USER
)
| 44.993421
| 88
| 0.702588
|
import logging
import pytest
from rbac.common import addresser
from tests.rbac.common.assertions import TestAssertions
LOGGER = logging.getLogger(__name__)
@pytest.mark.addressing
@pytest.mark.library
class TestUserAddresser(TestAssertions):
def test_address(self):
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
self.assertIsAddress(user_address)
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
def test_unique_id(self):
id1 = addresser.user.unique_id()
id2 = addresser.user.unique_id()
self.assertIsIdentifier(id1)
self.assertIsIdentifier(id2)
self.assertNotEqual(id1, id2)
def test_get_address_type(self):
user_address = addresser.user.address(addresser.user.unique_id())
role_address = addresser.role.address(addresser.role.unique_id())
self.assertEqual(
addresser.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.user.get_address_type(user_address), addresser.AddressSpace.USER
)
self.assertIsNone(addresser.user.get_address_type(role_address))
self.assertEqual(
addresser.get_address_type(role_address),
addresser.AddressSpace.ROLES_ATTRIBUTES,
)
def test_get_addresser(self):
user_address = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsInstance(
addresser.get_addresser(user_address), type(addresser.user)
)
self.assertIsInstance(
addresser.user.get_addresser(user_address), type(addresser.user)
)
self.assertIsNone(addresser.user.get_addresser(other_address))
def test_user_parse(self):
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.user.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_addresser_parse(self):
user_id = addresser.user.unique_id()
user_address = addresser.user.address(user_id)
parsed = addresser.parse(user_address)
self.assertEqual(parsed.object_type, addresser.ObjectType.USER)
self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)
self.assertEqual(
parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES
)
self.assertEqual(parsed.address_type, addresser.AddressSpace.USER)
self.assertEqual(parsed.object_id, user_id)
self.assertEqual(parsed.related_id, None)
def test_parse_other(self):
other_address = addresser.role.address(addresser.role.unique_id())
self.assertIsNone(addresser.user.parse(other_address))
def test_addresses_are(self):
user_address1 = addresser.user.address(addresser.user.unique_id())
user_address2 = addresser.user.address(addresser.user.unique_id())
other_address = addresser.role.address(addresser.role.unique_id())
self.assertTrue(addresser.user.addresses_are([user_address1]))
self.assertTrue(addresser.user.addresses_are([user_address1, user_address2]))
self.assertFalse(addresser.user.addresses_are([other_address]))
self.assertFalse(addresser.user.addresses_are([user_address1, other_address]))
self.assertFalse(addresser.user.addresses_are([other_address, user_address1]))
self.assertTrue(addresser.user.addresses_are([]))
def test_address_deterministic(self):
user_id1 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id1)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
def test_address_random(self):
user_id1 = addresser.user.unique_id()
user_id2 = addresser.user.unique_id()
user_address1 = addresser.user.address(user_id1)
user_address2 = addresser.user.address(user_id2)
self.assertIsAddress(user_address1)
self.assertIsAddress(user_address2)
self.assertNotEqual(user_address1, user_address2)
self.assertEqual(
addresser.get_address_type(user_address1), addresser.AddressSpace.USER
)
self.assertEqual(
addresser.get_address_type(user_address2), addresser.AddressSpace.USER
)
| true
| true
|
7905be5e4117cc2590adadd9807b99f46123e674
| 111
|
py
|
Python
|
elif_bayindir/phase_1/python_basic_1/day_6/q9.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
elif_bayindir/phase_1/python_basic_1/day_6/q9.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
elif_bayindir/phase_1/python_basic_1/day_6/q9.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
# Question 9
# List all files in a directory.
from os import listdir
print(listdir('/home/elif/Desktop'))
| 11.1
| 36
| 0.711712
|
from os import listdir
print(listdir('/home/elif/Desktop'))
| true
| true
|
7905be6861631a348a42f4712e9ed70f6221bcd9
| 1,781
|
py
|
Python
|
check/core/migrations/0010_auto_20171126_1351.py
|
gabrielnaoto/man_check_control
|
ab594d2059460820aeacc84062444f1720ed52f9
|
[
"MIT"
] | null | null | null |
check/core/migrations/0010_auto_20171126_1351.py
|
gabrielnaoto/man_check_control
|
ab594d2059460820aeacc84062444f1720ed52f9
|
[
"MIT"
] | 3
|
2020-02-12T00:23:10.000Z
|
2021-06-10T20:03:03.000Z
|
check/core/migrations/0010_auto_20171126_1351.py
|
gabrielnaoto/checkapp
|
ab594d2059460820aeacc84062444f1720ed52f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-26 15:51
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20171126_1058'),
]
operations = [
migrations.RemoveField(
model_name='emitido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_lancamento',
),
migrations.AlterField(
model_name='banco',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='cliente',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='emitido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
migrations.AlterField(
model_name='fornecedor',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='recebido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
]
| 32.381818
| 108
| 0.585065
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20171126_1058'),
]
operations = [
migrations.RemoveField(
model_name='emitido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_entrada',
),
migrations.RemoveField(
model_name='recebido',
name='data_lancamento',
),
migrations.AlterField(
model_name='banco',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='cliente',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='emitido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
migrations.AlterField(
model_name='fornecedor',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 407006, tzinfo=utc)),
),
migrations.AlterField(
model_name='recebido',
name='data_cadastro',
field=models.DateField(default=datetime.datetime(2017, 11, 26, 15, 51, 58, 424070, tzinfo=utc)),
),
]
| true
| true
|
7905bea2d4224c9ea43049d060ab1cb20d4d6352
| 3,032
|
py
|
Python
|
gammagl/utils/coalesce.py
|
BUPT-GAMMA/GammaGL
|
2b9f32e1ac3533cb75a063243e8a2fa654466d18
|
[
"Apache-2.0"
] | null | null | null |
gammagl/utils/coalesce.py
|
BUPT-GAMMA/GammaGL
|
2b9f32e1ac3533cb75a063243e8a2fa654466d18
|
[
"Apache-2.0"
] | null | null | null |
gammagl/utils/coalesce.py
|
BUPT-GAMMA/GammaGL
|
2b9f32e1ac3533cb75a063243e8a2fa654466d18
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorlayerx as tlx
import gammagl.mpops as mpops
from .num_nodes import maybe_num_nodes
from .check import check_is_numpy
def coalesce(edge_index, edge_attr=None, num_nodes=None, reduce="add", is_sorted=False, sort_by_row=True):
"""Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are merged by scattering them
together according to the given :obj:`reduce` option.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
reduce (string, optional): The reduce operation to use for merging edge
features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"mul"`). (default: :obj:`"add"`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)
"""
if tlx.is_tensor(edge_index):
edge_index = tlx.convert_to_numpy(edge_index)
nnz = edge_index.shape[1]
num_nodes = maybe_num_nodes(edge_index, num_nodes)
idx = np.zeros(nnz+1)
idx[0] = -1
idx[1:] = edge_index[1 - int(sort_by_row)]
idx[1:] = (np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]))
if not is_sorted:
perm = np.argsort(idx[1:])
idx[1:] = np.sort(idx[1:])
edge_index = edge_index[:, perm]
if edge_attr is not None and tlx.ops.is_tensor(edge_attr):
edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0)
elif edge_attr is not None and check_is_numpy(edge_attr):
edge_attr = edge_attr[perm]
elif edge_attr is not None: # edge_attr is List.
edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr]
mask = idx[1:] > idx[:-1]
# Only perform expensive merging in case there exists duplicates:
if mask.all():
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
return edge_index if edge_attr is None else (edge_index, edge_attr)
edge_index = edge_index[:, mask]
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
if edge_attr is None:
return edge_index
idx = np.arange(0, nnz)
idx = tlx.convert_to_tensor(idx - (1 - mask).cumsum(axis=0))
if tlx.ops.is_tensor(edge_attr):
edge_attr = mpops.segment_sum(edge_attr, idx)
return edge_index, edge_attr
| 42.704225
| 106
| 0.649406
|
import numpy as np
import tensorlayerx as tlx
import gammagl.mpops as mpops
from .num_nodes import maybe_num_nodes
from .check import check_is_numpy
def coalesce(edge_index, edge_attr=None, num_nodes=None, reduce="add", is_sorted=False, sort_by_row=True):
if tlx.is_tensor(edge_index):
edge_index = tlx.convert_to_numpy(edge_index)
nnz = edge_index.shape[1]
num_nodes = maybe_num_nodes(edge_index, num_nodes)
idx = np.zeros(nnz+1)
idx[0] = -1
idx[1:] = edge_index[1 - int(sort_by_row)]
idx[1:] = (np.add(np.multiply(idx[1:], num_nodes), edge_index[int(sort_by_row)]))
if not is_sorted:
perm = np.argsort(idx[1:])
idx[1:] = np.sort(idx[1:])
edge_index = edge_index[:, perm]
if edge_attr is not None and tlx.ops.is_tensor(edge_attr):
edge_attr = tlx.gather(edge_attr, tlx.convert_to_tensor(perm), axis=0)
elif edge_attr is not None and check_is_numpy(edge_attr):
edge_attr = edge_attr[perm]
elif edge_attr is not None:
edge_attr = [tlx.gather(e, perm, axis=0) for e in edge_attr]
mask = idx[1:] > idx[:-1]
if mask.all():
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
return edge_index if edge_attr is None else (edge_index, edge_attr)
edge_index = edge_index[:, mask]
edge_index = tlx.convert_to_tensor(edge_index, dtype=tlx.int64)
if edge_attr is None:
return edge_index
idx = np.arange(0, nnz)
idx = tlx.convert_to_tensor(idx - (1 - mask).cumsum(axis=0))
if tlx.ops.is_tensor(edge_attr):
edge_attr = mpops.segment_sum(edge_attr, idx)
return edge_index, edge_attr
| true
| true
|
7905becbdd748bab5a5e7c8f61d79f8832a5890c
| 11,014
|
py
|
Python
|
troposphere/validators.py
|
boostchicken/troposphere
|
5b80ec654f6f2b3bfac10fb70b528741c3d79138
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/validators.py
|
boostchicken/troposphere
|
5b80ec654f6f2b3bfac10fb70b528741c3d79138
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/validators.py
|
boostchicken/troposphere
|
5b80ec654f6f2b3bfac10fb70b528741c3d79138
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
import json
from re import compile
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
try:
int(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid integer" % x)
else:
return x
def positive_integer(x):
p = integer(x)
if int(p) < 0:
raise ValueError("%r is not a positive integer" % x)
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def integer_list_item(allowed_values):
def integer_list_item_checker(x):
i = positive_integer(x)
if i in allowed_values:
return x
raise ValueError('Integer must be one of following: %s' %
', '.join(str(j) for j in allowed_values))
return integer_list_item_checker
def double(x):
try:
float(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid double" % x)
else:
return x
def ignore(x):
"""Method to indicate bypassing property validation"""
return x
def defer(x):
"""Method to indicate defering property validation"""
return x
def network_port(x):
from . import AWSHelperFn
# Network ports can be Ref items
if isinstance(x, AWSHelperFn):
return x
i = integer(x)
if int(i) < -1 or int(i) > 65535:
raise ValueError("network port %r must been between 0 and 65535" % i)
return x
def tg_healthcheck_port(x):
if isinstance(x, str) and x == "traffic-port":
return x
return network_port(x)
def s3_bucket_name(b):
# consecutive periods not allowed
if '..' in b:
raise ValueError("%s is not a valid s3 bucket name" % b)
# IP addresses not allowed
ip_re = compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
if ip_re.match(b):
raise ValueError("%s is not a valid s3 bucket name" % b)
s3_bucket_name_re = compile(r'^[a-z\d][a-z\d\.-]{1,61}[a-z\d]$')
if s3_bucket_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid s3 bucket name" % b)
def elb_name(b):
elb_name_re = compile(r'^[a-zA-Z0-9](?:[a-zA-Z0-9\-]{0,30}[a-zA-Z0-9]{1})?$') # noqa
if elb_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid elb name" % b)
def encoding(encoding):
valid_encodings = ['plain', 'base64']
if encoding not in valid_encodings:
raise ValueError('Encoding needs to be one of %r' % valid_encodings)
return encoding
def status(status):
valid_statuses = ['Active', 'Inactive']
if status not in valid_statuses:
raise ValueError('Status needs to be one of %r' % valid_statuses)
return status
def s3_transfer_acceleration_status(value):
valid_status = ['Enabled', 'Suspended']
if value not in valid_status:
raise ValueError(
'AccelerationStatus must be one of: "%s"' % (
', '.join(valid_status)
)
)
return value
def iam_names(b):
iam_name_re = compile(r'^[a-zA-Z0-9_\.\+\=\@\-\,]+$')
if iam_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid iam name" % b)
def iam_user_name(user_name):
if not user_name:
raise ValueError(
"AWS::IAM::User property 'UserName' may not be empty")
if len(user_name) > 64:
raise ValueError(
"AWS::IAM::User property 'UserName' may not exceed 64 characters")
iam_user_name_re = compile(r'^[\w+=,.@-]+$')
if iam_user_name_re.match(user_name):
return user_name
else:
raise ValueError(
"%s is not a valid value for AWS::IAM::User property 'UserName'",
user_name)
def iam_path(path):
if len(path) > 512:
raise ValueError('IAM path %s may not exceed 512 characters', path)
iam_path_re = compile(r'^\/.*\/$|^\/$')
if not iam_path_re.match(path):
raise ValueError("%s is not a valid iam path name" % path)
return path
def iam_role_name(role_name):
if len(role_name) > 64:
raise ValueError('IAM Role Name may not exceed 64 characters')
iam_names(role_name)
return role_name
def iam_group_name(group_name):
if len(group_name) > 128:
raise ValueError('IAM Role Name may not exceed 128 characters')
iam_names(group_name)
return group_name
def mutually_exclusive(class_name, properties, conditionals):
from . import NoValue
found_list = []
for c in conditionals:
if c in properties and not properties[c] == NoValue:
found_list.append(c)
seen = set(found_list)
specified_count = len(seen)
if specified_count > 1:
raise ValueError(('%s: only one of the following'
' can be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def exactly_one(class_name, properties, conditionals):
specified_count = mutually_exclusive(class_name, properties, conditionals)
if specified_count != 1:
raise ValueError(('%s: one of the following'
' must be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def check_required(class_name, properties, conditionals):
for c in conditionals:
if c not in properties:
raise ValueError("Resource %s required in %s" % c, class_name)
def json_checker(name, prop):
from . import AWSHelperFn
if isinstance(prop, basestring):
# Verify it is a valid json string
json.loads(prop)
return prop
elif isinstance(prop, dict):
# Convert the dict to a basestring
return json.dumps(prop)
elif isinstance(prop, AWSHelperFn):
return prop
else:
raise ValueError("%s must be a str or dict" % name)
def notification_type(notification):
valid_notifications = ['Command', 'Invocation']
if notification not in valid_notifications:
raise ValueError(
'NotificationType must be one of: "%s"' % (
', '.join(valid_notifications)
)
)
return notification
def notification_event(events):
valid_events = ['All', 'InProgress', 'Success', 'TimedOut', 'Cancelled',
'Failed']
for event in events:
if event not in valid_events:
raise ValueError(
'NotificationEvents must be at least one of: "%s"' % (
', '.join(valid_events)
)
)
return events
def task_type(task):
valid_tasks = ['RUN_COMMAND', 'AUTOMATION', 'LAMBDA', 'STEP_FUNCTION']
if task not in valid_tasks:
raise ValueError(
'TaskType must be one of: "%s"' % (
', '.join(valid_tasks)
)
)
return task
def compliance_level(level):
valid_levels = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'INFORMATIONAL',
'UNSPECIFIED']
if level not in valid_levels:
raise ValueError(
'ApprovedPatchesComplianceLevel must be one of: "%s"' % (
', '.join(valid_levels)
)
)
return level
def operating_system(os):
valid_os = ['WINDOWS', 'AMAZON_LINUX', 'AMAZON_LINUX_2', 'UBUNTU',
'REDHAT_ENTERPRISE_LINUX', 'SUSE', 'CENTOS']
if os not in valid_os:
raise ValueError(
'OperatingSystem must be one of: "%s"' % (
', '.join(valid_os)
)
)
return os
def vpn_pre_shared_key(key):
pre_shared_key_match_re = compile(
r'^(?!0)([A-Za-z0-9]|\_|\.){8,64}$'
)
if not pre_shared_key_match_re.match(key):
raise ValueError(
'%s is not a valid key.'
' Allowed characters are alphanumeric characters and ._. Must'
' be between 8 and 64 characters in length and cannot'
' start with zero (0).' % key
)
return(key)
def vpn_tunnel_inside_cidr(cidr):
reserved_cidrs = [
'169.254.0.0/30',
'169.254.1.0/30',
'169.254.2.0/30',
'169.254.3.0/30',
'169.254.4.0/30',
'169.254.5.0/30',
'169.254.169.252/30'
]
cidr_match_re = compile(
r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)"
r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$"
)
if cidr in reserved_cidrs:
raise ValueError(
'The following CIDR blocks are reserved and cannot be used: "%s"' %
(', '.join(reserved_cidrs))
)
elif not cidr_match_re.match(cidr):
raise ValueError(
'%s is not a valid CIDR.'
' A size /30 CIDR block from the 169.254.0.0/16 must be specified.'
% cidr)
return(cidr)
def vpc_endpoint_type(endpoint_type):
valid_types = ['Interface', 'Gateway']
if endpoint_type not in valid_types:
raise ValueError(
'VpcEndpointType must be one of: "%s"' % (
', '.join(valid_types)
)
)
return(endpoint_type)
def scalable_dimension_type(scalable_dimension):
valid_values = ['autoscaling:autoScalingGroup:DesiredCapacity',
'ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'rds:cluster:ReadReplicaCount',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits'
]
if scalable_dimension not in valid_values:
raise ValueError(
'ScalableDimension must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(scalable_dimension)
def service_namespace_type(service_namespace):
valid_values = ['autoscaling', 'ecs', 'ec2', 'rds', 'dynamodb']
if service_namespace not in valid_values:
raise ValueError(
'ServiceNamespace must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(service_namespace)
def statistic_type(statistic):
valid_values = ['Average', 'Minimum', 'Maximum',
'SampleCount', 'Sum'
]
if statistic not in valid_values:
raise ValueError(
'Statistic must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(statistic)
| 27.813131
| 89
| 0.580897
|
import json
from re import compile
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
try:
int(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid integer" % x)
else:
return x
def positive_integer(x):
p = integer(x)
if int(p) < 0:
raise ValueError("%r is not a positive integer" % x)
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def integer_list_item(allowed_values):
def integer_list_item_checker(x):
i = positive_integer(x)
if i in allowed_values:
return x
raise ValueError('Integer must be one of following: %s' %
', '.join(str(j) for j in allowed_values))
return integer_list_item_checker
def double(x):
try:
float(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid double" % x)
else:
return x
def ignore(x):
return x
def defer(x):
return x
def network_port(x):
from . import AWSHelperFn
if isinstance(x, AWSHelperFn):
return x
i = integer(x)
if int(i) < -1 or int(i) > 65535:
raise ValueError("network port %r must been between 0 and 65535" % i)
return x
def tg_healthcheck_port(x):
if isinstance(x, str) and x == "traffic-port":
return x
return network_port(x)
def s3_bucket_name(b):
if '..' in b:
raise ValueError("%s is not a valid s3 bucket name" % b)
ip_re = compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
if ip_re.match(b):
raise ValueError("%s is not a valid s3 bucket name" % b)
s3_bucket_name_re = compile(r'^[a-z\d][a-z\d\.-]{1,61}[a-z\d]$')
if s3_bucket_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid s3 bucket name" % b)
def elb_name(b):
elb_name_re = compile(r'^[a-zA-Z0-9](?:[a-zA-Z0-9\-]{0,30}[a-zA-Z0-9]{1})?$')
if elb_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid elb name" % b)
def encoding(encoding):
valid_encodings = ['plain', 'base64']
if encoding not in valid_encodings:
raise ValueError('Encoding needs to be one of %r' % valid_encodings)
return encoding
def status(status):
valid_statuses = ['Active', 'Inactive']
if status not in valid_statuses:
raise ValueError('Status needs to be one of %r' % valid_statuses)
return status
def s3_transfer_acceleration_status(value):
valid_status = ['Enabled', 'Suspended']
if value not in valid_status:
raise ValueError(
'AccelerationStatus must be one of: "%s"' % (
', '.join(valid_status)
)
)
return value
def iam_names(b):
iam_name_re = compile(r'^[a-zA-Z0-9_\.\+\=\@\-\,]+$')
if iam_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid iam name" % b)
def iam_user_name(user_name):
if not user_name:
raise ValueError(
"AWS::IAM::User property 'UserName' may not be empty")
if len(user_name) > 64:
raise ValueError(
"AWS::IAM::User property 'UserName' may not exceed 64 characters")
iam_user_name_re = compile(r'^[\w+=,.@-]+$')
if iam_user_name_re.match(user_name):
return user_name
else:
raise ValueError(
"%s is not a valid value for AWS::IAM::User property 'UserName'",
user_name)
def iam_path(path):
if len(path) > 512:
raise ValueError('IAM path %s may not exceed 512 characters', path)
iam_path_re = compile(r'^\/.*\/$|^\/$')
if not iam_path_re.match(path):
raise ValueError("%s is not a valid iam path name" % path)
return path
def iam_role_name(role_name):
if len(role_name) > 64:
raise ValueError('IAM Role Name may not exceed 64 characters')
iam_names(role_name)
return role_name
def iam_group_name(group_name):
if len(group_name) > 128:
raise ValueError('IAM Role Name may not exceed 128 characters')
iam_names(group_name)
return group_name
def mutually_exclusive(class_name, properties, conditionals):
from . import NoValue
found_list = []
for c in conditionals:
if c in properties and not properties[c] == NoValue:
found_list.append(c)
seen = set(found_list)
specified_count = len(seen)
if specified_count > 1:
raise ValueError(('%s: only one of the following'
' can be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def exactly_one(class_name, properties, conditionals):
specified_count = mutually_exclusive(class_name, properties, conditionals)
if specified_count != 1:
raise ValueError(('%s: one of the following'
' must be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def check_required(class_name, properties, conditionals):
for c in conditionals:
if c not in properties:
raise ValueError("Resource %s required in %s" % c, class_name)
def json_checker(name, prop):
from . import AWSHelperFn
if isinstance(prop, basestring):
json.loads(prop)
return prop
elif isinstance(prop, dict):
return json.dumps(prop)
elif isinstance(prop, AWSHelperFn):
return prop
else:
raise ValueError("%s must be a str or dict" % name)
def notification_type(notification):
valid_notifications = ['Command', 'Invocation']
if notification not in valid_notifications:
raise ValueError(
'NotificationType must be one of: "%s"' % (
', '.join(valid_notifications)
)
)
return notification
def notification_event(events):
valid_events = ['All', 'InProgress', 'Success', 'TimedOut', 'Cancelled',
'Failed']
for event in events:
if event not in valid_events:
raise ValueError(
'NotificationEvents must be at least one of: "%s"' % (
', '.join(valid_events)
)
)
return events
def task_type(task):
valid_tasks = ['RUN_COMMAND', 'AUTOMATION', 'LAMBDA', 'STEP_FUNCTION']
if task not in valid_tasks:
raise ValueError(
'TaskType must be one of: "%s"' % (
', '.join(valid_tasks)
)
)
return task
def compliance_level(level):
valid_levels = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'INFORMATIONAL',
'UNSPECIFIED']
if level not in valid_levels:
raise ValueError(
'ApprovedPatchesComplianceLevel must be one of: "%s"' % (
', '.join(valid_levels)
)
)
return level
def operating_system(os):
valid_os = ['WINDOWS', 'AMAZON_LINUX', 'AMAZON_LINUX_2', 'UBUNTU',
'REDHAT_ENTERPRISE_LINUX', 'SUSE', 'CENTOS']
if os not in valid_os:
raise ValueError(
'OperatingSystem must be one of: "%s"' % (
', '.join(valid_os)
)
)
return os
def vpn_pre_shared_key(key):
pre_shared_key_match_re = compile(
r'^(?!0)([A-Za-z0-9]|\_|\.){8,64}$'
)
if not pre_shared_key_match_re.match(key):
raise ValueError(
'%s is not a valid key.'
' Allowed characters are alphanumeric characters and ._. Must'
' be between 8 and 64 characters in length and cannot'
' start with zero (0).' % key
)
return(key)
def vpn_tunnel_inside_cidr(cidr):
reserved_cidrs = [
'169.254.0.0/30',
'169.254.1.0/30',
'169.254.2.0/30',
'169.254.3.0/30',
'169.254.4.0/30',
'169.254.5.0/30',
'169.254.169.252/30'
]
cidr_match_re = compile(
r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)"
r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$"
)
if cidr in reserved_cidrs:
raise ValueError(
'The following CIDR blocks are reserved and cannot be used: "%s"' %
(', '.join(reserved_cidrs))
)
elif not cidr_match_re.match(cidr):
raise ValueError(
'%s is not a valid CIDR.'
' A size /30 CIDR block from the 169.254.0.0/16 must be specified.'
% cidr)
return(cidr)
def vpc_endpoint_type(endpoint_type):
valid_types = ['Interface', 'Gateway']
if endpoint_type not in valid_types:
raise ValueError(
'VpcEndpointType must be one of: "%s"' % (
', '.join(valid_types)
)
)
return(endpoint_type)
def scalable_dimension_type(scalable_dimension):
valid_values = ['autoscaling:autoScalingGroup:DesiredCapacity',
'ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'rds:cluster:ReadReplicaCount',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits'
]
if scalable_dimension not in valid_values:
raise ValueError(
'ScalableDimension must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(scalable_dimension)
def service_namespace_type(service_namespace):
valid_values = ['autoscaling', 'ecs', 'ec2', 'rds', 'dynamodb']
if service_namespace not in valid_values:
raise ValueError(
'ServiceNamespace must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(service_namespace)
def statistic_type(statistic):
valid_values = ['Average', 'Minimum', 'Maximum',
'SampleCount', 'Sum'
]
if statistic not in valid_values:
raise ValueError(
'Statistic must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(statistic)
| true
| true
|
7905bfc14ae33a328707eb1744213addf99c65ca
| 405
|
py
|
Python
|
timecat/apps/course/migrations/0005_video_url.py
|
LinXueyuanStdio/memp
|
c6f6609cec7c54ec23881838dacb5f4ffba2e68c
|
[
"Apache-2.0"
] | null | null | null |
timecat/apps/course/migrations/0005_video_url.py
|
LinXueyuanStdio/memp
|
c6f6609cec7c54ec23881838dacb5f4ffba2e68c
|
[
"Apache-2.0"
] | null | null | null |
timecat/apps/course/migrations/0005_video_url.py
|
LinXueyuanStdio/memp
|
c6f6609cec7c54ec23881838dacb5f4ffba2e68c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-03-25 23:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0004_course_tag'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='', max_length=200, verbose_name='访问地址'),
),
]
| 21.315789
| 84
| 0.592593
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0004_course_tag'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='', max_length=200, verbose_name='访问地址'),
),
]
| true
| true
|
7905c2a58d5806248e42f39d1e66c108cb0f711c
| 360
|
py
|
Python
|
espionage/constants.py
|
iAbdullahMughal/espionage
|
b223d82700fcdb27f4e4eae632466c53de061717
|
[
"MIT"
] | 7
|
2021-12-20T12:49:30.000Z
|
2022-02-24T13:37:28.000Z
|
espionage/constants.py
|
iAbdullahMughal/espionage
|
b223d82700fcdb27f4e4eae632466c53de061717
|
[
"MIT"
] | 1
|
2021-12-27T15:02:53.000Z
|
2021-12-27T15:02:53.000Z
|
espionage/constants.py
|
iAbdullahMughal/espionage
|
b223d82700fcdb27f4e4eae632466c53de061717
|
[
"MIT"
] | 3
|
2021-12-27T14:58:46.000Z
|
2022-01-03T08:19:41.000Z
|
class Constants:
"""Storing all constants of the project."""
_author = "muhammad abdullah"
_email = "iamabdullahmughal@gmail.com"
_version = "0.0.4"
@property
def author(self):
return self._author
@property
def email(self):
return self._email
@property
def version(self):
return self._version
| 20
| 47
| 0.622222
|
class Constants:
_author = "muhammad abdullah"
_email = "iamabdullahmughal@gmail.com"
_version = "0.0.4"
@property
def author(self):
return self._author
@property
def email(self):
return self._email
@property
def version(self):
return self._version
| true
| true
|
7905c338b5080fd58898d58f8668c2eb8d8fb653
| 2,812
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-03-08T15:05:29.000Z
|
2021-03-08T15:05:29.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class ProxyProtocolPolicy(pulumi.CustomResource):
instance_ports: pulumi.Output[list]
"""
List of instance ports to which the policy
should be applied. This can be specified if the protocol is SSL or TCP.
"""
load_balancer: pulumi.Output[str]
"""
The load balancer to which the policy
should be attached.
"""
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None):
"""
Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] instance_ports: List of instance ports to which the policy
should be applied. This can be specified if the protocol is SSL or TCP.
:param pulumi.Input[str] load_balancer: The load balancer to which the policy
should be attached.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if instance_ports is None:
raise TypeError('Missing required property instance_ports')
__props__['instance_ports'] = instance_ports
if load_balancer is None:
raise TypeError('Missing required property load_balancer')
__props__['load_balancer'] = load_balancer
super(ProxyProtocolPolicy, __self__).__init__(
'aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 40.753623
| 124
| 0.68101
|
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class ProxyProtocolPolicy(pulumi.CustomResource):
instance_ports: pulumi.Output[list]
load_balancer: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if instance_ports is None:
raise TypeError('Missing required property instance_ports')
__props__['instance_ports'] = instance_ports
if load_balancer is None:
raise TypeError('Missing required property load_balancer')
__props__['load_balancer'] = load_balancer
super(ProxyProtocolPolicy, __self__).__init__(
'aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
7905c367ba9dd24464c18809c87a146873ee5ba7
| 26,545
|
py
|
Python
|
mac/v3.6.4/lib/python3.6/test/test_fractions.py
|
evanmoran/python3-bazel
|
e6c5238e7c2bf290ae469ee1287de7af1dc4509a
|
[
"MIT"
] | 120
|
2019-11-12T19:22:44.000Z
|
2020-05-17T12:17:25.000Z
|
mac/v3.6.4/lib/python3.6/test/test_fractions.py
|
evanmoran/python3-bazel
|
e6c5238e7c2bf290ae469ee1287de7af1dc4509a
|
[
"MIT"
] | 49
|
2016-02-29T17:59:52.000Z
|
2019-05-05T04:59:26.000Z
|
mac/v3.6.4/lib/python3.6/test/test_fractions.py
|
evanmoran/python3-bazel
|
e6c5238e7c2bf290ae469ee1287de7af1dc4509a
|
[
"MIT"
] | 28
|
2019-06-27T04:11:27.000Z
|
2022-03-11T06:27:44.000Z
|
"""Tests for Lib/fractions.py."""
from decimal import Decimal
from test.support import requires_IEEE_754
import math
import numbers
import operator
import fractions
import sys
import unittest
import warnings
from copy import copy, deepcopy
from pickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
# shouldn't be calling __float__ at all when doing comparisons
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = math.gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
class DummyFraction(fractions.Fraction):
"""Dummy Fraction subclass for copy and deepcopy testing."""
class GcdTest(unittest.TestCase):
def testMisc(self):
# fractions.gcd() is deprecated
with self.assertWarnsRegex(DeprecationWarning, r'fractions\.gcd'):
gcd(1, 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'fractions\.gcd',
DeprecationWarning)
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
self.assertEqual(gcd(120.0, 84), 12.0)
self.assertEqual(gcd(120, 84.0), 12.0)
self.assertEqual(gcd(F(120), F(84)), F(12))
self.assertEqual(gcd(F(120, 77), F(84, 55)), F(12, 385))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
self.assertRaises(TypeError, F, 1, 2, 3)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
# bug 16469: error types should be consistent with float -> int
self.assertRaises(ValueError, F, float('nan'))
self.assertRaises(OverflowError, F, float('inf'))
self.assertRaises(OverflowError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaises(ValueError, F, Decimal('nan'))
self.assertRaises(ValueError, F, Decimal('snan'))
self.assertRaises(OverflowError, F, Decimal('inf'))
self.assertRaises(OverflowError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(" -3.2 ")))
self.assertEqual((-3, 1), _components(F(" -3. ")))
self.assertEqual((3, 5), _components(F(" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
# bug 16469: error types should be consistent with float -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, inf)
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, -inf)
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertTypedEquals(-2, math.floor(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-10, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(0, round(F(-1, 10)))
self.assertTypedEquals(0, round(F(-5, 10)))
self.assertTypedEquals(-2, round(F(-15, 10)))
self.assertTypedEquals(-1, round(F(-7, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
# numerator and denominator to float before dividing.
self.assertRaises(OverflowError, float, int('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(int('2'*400+'7'), int('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testRound(self):
self.assertTypedEquals(F(-200), round(F(-150), -2))
self.assertTypedEquals(F(-200), round(F(-250), -2))
self.assertTypedEquals(F(30), round(F(26), -1))
self.assertTypedEquals(F(-2, 10), round(F(-15, 100), 1))
self.assertTypedEquals(F(-2, 10), round(F(-25, 100), 1))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
z = pow(F(-1), F(1, 2))
self.assertAlmostEqual(z.real, 0)
self.assertEqual(z.imag, 1)
# Regression test for #27539.
p = F(-1, 2) ** 0
self.assertEqual(p, F(1, 1))
self.assertEqual(p.numerator, 1)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -1
self.assertEqual(p, F(-2, 1))
self.assertEqual(p.numerator, -2)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -2
self.assertEqual(p, F(4, 1))
self.assertEqual(p.numerator, 4)
self.assertEqual(p.denominator, 1)
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# No need for divmod since we don't override it.
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
z = pow(-1, F(1, 2))
self.assertAlmostEqual(0, z.real)
self.assertEqual(1, z.imag)
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
self.assertRaises(ZeroDivisionError, operator.pow,
F(0, 1), -2)
def testMixingWithDecimal(self):
# Decimal refuses mixed arithmetic (but not mixed comparisons)
self.assertRaises(TypeError, operator.add,
F(3,11), Decimal('3.1415926'))
self.assertRaises(TypeError, operator.add,
Decimal('3.1415926'), F(3,11))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
# The first test demonstrates why these are important.
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
hmod = sys.hash_info.modulus
hinf = sys.hash_info.inf
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
self.assertEqual(hinf, hash(F(1, hmod)))
# Check that __hash__ produces the same value as hash(), for
# consistency with int and Decimal. (See issue #10356.)
self.assertEqual(hash(F(-1)), F(-1).__hash__())
def testApproximatePi(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
if __name__ == '__main__':
unittest.main()
| 41.737421
| 78
| 0.573102
|
from decimal import Decimal
from test.support import requires_IEEE_754
import math
import numbers
import operator
import fractions
import sys
import unittest
import warnings
from copy import copy, deepcopy
from pickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class DummyFloat(object):
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
class DummyRational(object):
def __init__(self, num, den):
g = math.gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
class DummyFraction(fractions.Fraction):
class GcdTest(unittest.TestCase):
def testMisc(self):
# fractions.gcd() is deprecated
with self.assertWarnsRegex(DeprecationWarning, r'fractions\.gcd'):
gcd(1, 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'fractions\.gcd',
DeprecationWarning)
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
self.assertEqual(gcd(120.0, 84), 12.0)
self.assertEqual(gcd(120, 84.0), 12.0)
self.assertEqual(gcd(F(120), F(84)), F(12))
self.assertEqual(gcd(F(120, 77), F(84, 55)), F(12, 385))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
self.assertRaises(TypeError, F, 1, 2, 3)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
# bug 16469: error types should be consistent with float -> int
self.assertRaises(ValueError, F, float('nan'))
self.assertRaises(OverflowError, F, float('inf'))
self.assertRaises(OverflowError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaises(ValueError, F, Decimal('nan'))
self.assertRaises(ValueError, F, Decimal('snan'))
self.assertRaises(OverflowError, F, Decimal('inf'))
self.assertRaises(OverflowError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(" -3.2 ")))
self.assertEqual((-3, 1), _components(F(" -3. ")))
self.assertEqual((3, 5), _components(F(" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
# bug 16469: error types should be consistent with float -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, inf)
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_float, -inf)
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
OverflowError, "cannot convert Infinity to integer ratio",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
ValueError, "cannot convert NaN to integer ratio",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertTypedEquals(-2, math.floor(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-10, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(0, round(F(-1, 10)))
self.assertTypedEquals(0, round(F(-5, 10)))
self.assertTypedEquals(-2, round(F(-15, 10)))
self.assertTypedEquals(-1, round(F(-7, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
self.assertRaises(OverflowError, float, int('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(int('2'*400+'7'), int('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testRound(self):
self.assertTypedEquals(F(-200), round(F(-150), -2))
self.assertTypedEquals(F(-200), round(F(-250), -2))
self.assertTypedEquals(F(30), round(F(26), -1))
self.assertTypedEquals(F(-2, 10), round(F(-15, 100), 1))
self.assertTypedEquals(F(-2, 10), round(F(-25, 100), 1))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
z = pow(F(-1), F(1, 2))
self.assertAlmostEqual(z.real, 0)
self.assertEqual(z.imag, 1)
p = F(-1, 2) ** 0
self.assertEqual(p, F(1, 1))
self.assertEqual(p.numerator, 1)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -1
self.assertEqual(p, F(-2, 1))
self.assertEqual(p.numerator, -2)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -2
self.assertEqual(p, F(4, 1))
self.assertEqual(p.numerator, 4)
self.assertEqual(p.denominator, 1)
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
z = pow(-1, F(1, 2))
self.assertAlmostEqual(0, z.real)
self.assertEqual(1, z.imag)
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
self.assertRaises(ZeroDivisionError, operator.pow,
F(0, 1), -2)
def testMixingWithDecimal(self):
# Decimal refuses mixed arithmetic (but not mixed comparisons)
self.assertRaises(TypeError, operator.add,
F(3,11), Decimal('3.1415926'))
self.assertRaises(TypeError, operator.add,
Decimal('3.1415926'), F(3,11))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
hmod = sys.hash_info.modulus
hinf = sys.hash_info.inf
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
self.assertEqual(hinf, hash(F(1, hmod)))
self.assertEqual(hash(F(-1)), F(-1).__hash__())
def testApproximatePi(self):
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7905c3d866881e59dd31d04a242799ad65275156
| 17,963
|
py
|
Python
|
tests/garage/tf/models/test_gru.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 7
|
2022-02-01T03:02:24.000Z
|
2022-02-10T12:54:05.000Z
|
tests/garage/tf/models/test_gru.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | null | null | null |
tests/garage/tf/models/test_gru.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 2
|
2022-02-03T03:33:25.000Z
|
2022-02-10T12:54:07.000Z
|
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.gru import gru
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGRU(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self.step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self.gru_cell = tf.keras.layers.GRUCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for _ in range(time_step):
output, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for i in range(time_step):
output1, hidden1 = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden1
}) # noqa: E126
hidden2 = recurrent_step_gru(input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init_trainable=True,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
_, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
with tf.compat.v1.variable_scope('GRU/gru', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
assert hidden_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
grads_step_o_i = tf.gradients(output_t, step_input_var)
grads_step_o_h = tf.gradients(output_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, step_input_var)
self.sess.run([grads_step_o_i, grads_step_o_h, grads_step_h],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
grads_step_o_i = tf.gradients(outputs_t, step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.gru_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens = rnn_layer(input_var, initial_state=[hidden_var])
outputs = output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens = self.sess.run([outputs, hiddens],
feed_dict={input_var: obs_inputs})
# Compute output by doing t step() on the gru cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
_, output_t, hidden_t, _ = self.gru
for i in range(time_step):
output, hidden = self.sess.run([output_t, hidden_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
# Also the full output from lstm
full_outputs = self.sess.run(self.gru[0],
feed_dict={input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| 43.812195
| 79
| 0.490675
|
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.gru import gru
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGRU(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self.step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self.gru_cell = tf.keras.layers.GRUCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0),
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
def test_output_shapes(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for _ in range(time_step):
output, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0),
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
def test_output_value(self, time_step, input_dim, output_dim, hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs_t, output_t, h_t, hidden_init = self.gru
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for i in range(time_step):
output1, hidden1 = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init_trainable=True,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
_, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with tf.compat.v1.variable_scope('GRU/gru', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
assert hidden_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
grads_step_o_i = tf.gradients(output_t, step_input_var)
grads_step_o_h = tf.gradients(output_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, step_input_var)
self.sess.run([grads_step_o_i, grads_step_o_h, grads_step_h],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
grads_step_o_i = tf.gradients(outputs_t, step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, input_var)
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={input_var: obs_inputs})
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0),
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
rnn_layer = tf.keras.layers.RNN(cell=self.gru_cell,
return_sequences=True,
return_state=True)
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens = rnn_layer(input_var, initial_state=[hidden_var])
outputs = output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens = self.sess.run([outputs, hiddens],
feed_dict={input_var: obs_inputs})
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
_, output_t, hidden_t, _ = self.gru
for i in range(time_step):
output, hidden = self.sess.run([output_t, hidden_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
full_outputs = self.sess.run(self.gru[0],
feed_dict={input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| true
| true
|
7905c49276db4f81c3c41db3e11535a22c230170
| 4,951
|
py
|
Python
|
examples/projector_order_test.py
|
KrisThielemans/parallelproj
|
b9e1cb27aaec9a1605e1842b7b3be8b6f32765d3
|
[
"MIT"
] | null | null | null |
examples/projector_order_test.py
|
KrisThielemans/parallelproj
|
b9e1cb27aaec9a1605e1842b7b3be8b6f32765d3
|
[
"MIT"
] | null | null | null |
examples/projector_order_test.py
|
KrisThielemans/parallelproj
|
b9e1cb27aaec9a1605e1842b7b3be8b6f32765d3
|
[
"MIT"
] | null | null | null |
# small demo for listmode TOF MLEM without subsets
import os
import matplotlib.pyplot as py
import pyparallelproj as ppp
from pyparallelproj.wrapper import joseph3d_fwd, joseph3d_fwd_tof, joseph3d_back, joseph3d_back_tof
import numpy as np
import argparse
import ctypes
from time import time
#---------------------------------------------------------------------------------
# parse the command line
parser = argparse.ArgumentParser()
parser.add_argument('--ngpus', help = 'number of GPUs to use', default = 0, type = int)
parser.add_argument('--nsubsets', help = 'number of subsets', default = 28, type = int)
parser.add_argument('--tpb', help = 'threads per block', default = 64, type = int)
parser.add_argument('--nontof', help = 'non-TOF instead of TOF', action = 'store_true')
parser.add_argument('--img_mem_order', help = 'memory layout for image', default = 'C',
choices = ['C','F'])
args = parser.parse_args()
#---------------------------------------------------------------------------------
ngpus = args.ngpus
nsubsets = args.nsubsets
tpb = args.tpb
tof = not args.nontof
img_mem_order = args.img_mem_order
subset = 0
if tof:
ntofbins = 27
else:
ntofbins = 1
np.random.seed(1)
#---------------------------------------------------------------------------------
# setup a scanner with one ring
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module = np.array([16,9]),
nmodules = np.array([28,5]))
# setup a test image
voxsize = np.array([2.,2.,2.])
n0 = 250
n1 = 250
n2 = max(1,int((scanner.xc2.max() - scanner.xc2.min()) / voxsize[2]))
# setup a random image
img = np.zeros((n0,n1,n2), dtype = np.float32, order = img_mem_order)
img[(n0//6):(5*n0//6),(n1//6):(5*n1//6),:] = 1
img_origin = (-(np.array(img.shape) / 2) + 0.5) * voxsize
# generate sinogram parameters and the projector
sd = np.array([[0,1,2],
[0,2,1],
[1,2,0],
[1,0,2],
[2,0,1],
[2,1,0]])
for sdo in sd:
sino_params = ppp.PETSinogramParameters(scanner, ntofbins = ntofbins, tofbin_width = 23.,
spatial_dim_order = sdo)
proj = ppp.SinogramProjector(scanner, sino_params, img.shape, nsubsets = nsubsets,
voxsize = voxsize, img_origin = img_origin, ngpus = ngpus,
tof = tof, sigma_tof = 60./2.35, n_sigmas = 3.,
threadsperblock = tpb)
# do a forward / back projection of subset 0 - same as img_fwd = proj.fwd_project(img, 0)
# we just write out the single steps to time the python overhead separately
#img_fwd = proj.fwd_project(img, 0)
#ones_sino = np.ones(img_fwd.shape, dtype = np.float32)
#back = proj.back_project(ones_sino, 0)
subset_slice = proj.subset_slices[subset]
sigma_tof = np.full(proj.nLORs[subset], proj.sigma_tof, dtype = ctypes.c_float).ravel()
tofcenter_offset = np.zeros(proj.nLORs[subset], dtype = ctypes.c_float).ravel()
xstart = proj.xstart[subset_slice].ravel()
xend = proj.xend[subset_slice].ravel()
img_ravel = img.ravel(order = img_mem_order)
subset_nLORs = proj.nLORs[subset]
img_fwd = np.zeros(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
back_img = np.zeros(proj.nvox, dtype = ctypes.c_float)
sino = np.ones(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
#--- time fwd projection
t0 = time()
if tof:
ok = joseph3d_fwd_tof(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_fwd(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t1 = time()
#--- time back projection
t2 = time()
if tof:
ok = joseph3d_back_tof(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_back(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t3 = time()
print(f'{sdo} {t1-t0} {t3-t2}')
| 38.379845
| 99
| 0.576247
|
import os
import matplotlib.pyplot as py
import pyparallelproj as ppp
from pyparallelproj.wrapper import joseph3d_fwd, joseph3d_fwd_tof, joseph3d_back, joseph3d_back_tof
import numpy as np
import argparse
import ctypes
from time import time
parser = argparse.ArgumentParser()
parser.add_argument('--ngpus', help = 'number of GPUs to use', default = 0, type = int)
parser.add_argument('--nsubsets', help = 'number of subsets', default = 28, type = int)
parser.add_argument('--tpb', help = 'threads per block', default = 64, type = int)
parser.add_argument('--nontof', help = 'non-TOF instead of TOF', action = 'store_true')
parser.add_argument('--img_mem_order', help = 'memory layout for image', default = 'C',
choices = ['C','F'])
args = parser.parse_args()
ngpus = args.ngpus
nsubsets = args.nsubsets
tpb = args.tpb
tof = not args.nontof
img_mem_order = args.img_mem_order
subset = 0
if tof:
ntofbins = 27
else:
ntofbins = 1
np.random.seed(1)
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module = np.array([16,9]),
nmodules = np.array([28,5]))
voxsize = np.array([2.,2.,2.])
n0 = 250
n1 = 250
n2 = max(1,int((scanner.xc2.max() - scanner.xc2.min()) / voxsize[2]))
img = np.zeros((n0,n1,n2), dtype = np.float32, order = img_mem_order)
img[(n0//6):(5*n0//6),(n1//6):(5*n1//6),:] = 1
img_origin = (-(np.array(img.shape) / 2) + 0.5) * voxsize
sd = np.array([[0,1,2],
[0,2,1],
[1,2,0],
[1,0,2],
[2,0,1],
[2,1,0]])
for sdo in sd:
sino_params = ppp.PETSinogramParameters(scanner, ntofbins = ntofbins, tofbin_width = 23.,
spatial_dim_order = sdo)
proj = ppp.SinogramProjector(scanner, sino_params, img.shape, nsubsets = nsubsets,
voxsize = voxsize, img_origin = img_origin, ngpus = ngpus,
tof = tof, sigma_tof = 60./2.35, n_sigmas = 3.,
threadsperblock = tpb)
subset_slice = proj.subset_slices[subset]
sigma_tof = np.full(proj.nLORs[subset], proj.sigma_tof, dtype = ctypes.c_float).ravel()
tofcenter_offset = np.zeros(proj.nLORs[subset], dtype = ctypes.c_float).ravel()
xstart = proj.xstart[subset_slice].ravel()
xend = proj.xend[subset_slice].ravel()
img_ravel = img.ravel(order = img_mem_order)
subset_nLORs = proj.nLORs[subset]
img_fwd = np.zeros(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
back_img = np.zeros(proj.nvox, dtype = ctypes.c_float)
sino = np.ones(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
t0 = time()
if tof:
ok = joseph3d_fwd_tof(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_fwd(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t1 = time()
t2 = time()
if tof:
ok = joseph3d_back_tof(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_back(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t3 = time()
print(f'{sdo} {t1-t0} {t3-t2}')
| true
| true
|
7905c49e9cb3c3cb9f8e4211e5b09d739b4a9a5c
| 27
|
py
|
Python
|
src/partesanato/__init__.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
src/partesanato/__init__.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | 1
|
2020-06-05T22:09:06.000Z
|
2020-06-05T22:09:06.000Z
|
src/partesanato/__init__.py
|
edgarbs1998/partesanato-server
|
1b01f75f94dca85ff5b963c49c237fb758b27b43
|
[
"MIT"
] | null | null | null |
# partesanato/__init__.py
| 13.5
| 26
| 0.777778
| true
| true
|
|
7905c4a01eb4bf2cf8957f83608436d165621168
| 337
|
py
|
Python
|
noteandtag/__version__.py
|
Nauja/noteandtag
|
0a16228c7c71c056dfcf4e3d7ca30ad5ec5fc2bd
|
[
"MIT"
] | null | null | null |
noteandtag/__version__.py
|
Nauja/noteandtag
|
0a16228c7c71c056dfcf4e3d7ca30ad5ec5fc2bd
|
[
"MIT"
] | null | null | null |
noteandtag/__version__.py
|
Nauja/noteandtag
|
0a16228c7c71c056dfcf4e3d7ca30ad5ec5fc2bd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# nodeandtag's package version information
__version_major__ = "0.2"
__version__ = "{}a1".format(__version_major__)
__version_long__ = "{}a1".format(__version_major__)
__status__ = "Alpha"
__author__ = "Jeremy Morosi"
__author_email__ = "jeremymorosi@hotmail.com"
__url__ = "https://github.com/Nauja/nodeandtag"
| 30.636364
| 51
| 0.750742
|
__version_major__ = "0.2"
__version__ = "{}a1".format(__version_major__)
__version_long__ = "{}a1".format(__version_major__)
__status__ = "Alpha"
__author__ = "Jeremy Morosi"
__author_email__ = "jeremymorosi@hotmail.com"
__url__ = "https://github.com/Nauja/nodeandtag"
| true
| true
|
7905c50545f5247bba21c5acca55ede55da38f5b
| 2,707
|
py
|
Python
|
Hemodynamic_Parameter_Analysis.py
|
TylerAdamMartinez/Minimally-Invasive-Monocusp-Valve
|
6876864b60a0a95f32abc7a15ef988214866f1be
|
[
"MIT"
] | 1
|
2019-12-07T21:25:28.000Z
|
2019-12-07T21:25:28.000Z
|
Hemodynamic_Parameter_Analysis.py
|
TylerFonzie8507/Minimally-Invasive-Monocusp-Valve
|
6876864b60a0a95f32abc7a15ef988214866f1be
|
[
"MIT"
] | null | null | null |
Hemodynamic_Parameter_Analysis.py
|
TylerFonzie8507/Minimally-Invasive-Monocusp-Valve
|
6876864b60a0a95f32abc7a15ef988214866f1be
|
[
"MIT"
] | null | null | null |
"""
This code was created by Tyler Adam Martinez for the BMEN3310 Final
#This are the varibles and what they stand for.
Hemodynamic Parameter Analysis
CS // Cross-Sectional Area of the heart valve
vR // Radius of Valve
DR // Disk Radius
TiA // Area of the Titanium wire
TiV // Volume of the Titanium wire
IRV // Inner Ring Volume
ORV // Outer Ring Volume
DR // Disk Volume
NS // Cost of Nitinol Stent per signal unit
PPC // Pure Pyrolytic Carbon per unit volume
Tf // Teflon Fabric per unit volume
Ti // Titanium Wire per unit volume
Hemodynamic Calculations
SVR // Systemic Vascular Resistance or Afterload on the heart
MAP // Mean Arterial Pressure
CVP // Central Venous Pressure
CO // Cardiac Output
SV // Stroke Volume
HR // Heart Rate
SBP // Systomic Blood Pressure
DBP // Diastolic Blood Pressure
"""
import math
pi = 3.14159265359;
## Hemodynamic Parameter Analysis
CS = input("The cross-sectional area of the valve: ");
CS = int(CS);
vR = math.sqrt(CS/pi); #Convert CS to v radius
height = 5.0; #mm
thinkness = 1.5; #mm
DR = vR - (2*thinkness); #calculating for the two outer disks
Diskheight = 1.5; #mm
#calculating the volumes of each material
TiA = 0.1024 * pi; #.32mm is radius of Titanium wire, and .1024 is r^2
TiV = 2*vR *TiA; #mm^3
IRV = pi * pow((DR + thinkness), 2) - (pi * pow(DR, 2)) * height; #mm^3
ORV = pi * pow((DR + (2*thinkness)), 2) - pi * pow((DR + thinkness),2) * height; #mm^3
DV = pi * pow(DR, 2) * Diskheight; #mm^3
#Constant Cost per volume values
NS = 100; # $ per unit
PPC = 0.00052; # $ per 1 mm^3
TF = 0.00014; # $ per 1 mm^3
Ti = 0.00064; # $ per 1 mm^3
#Material Cost = Volume of Material * Cost per Unit Volume
ORcost = ORV * TF + NS;
IRcost = IRV * PPC;
Dcost = (DV*(.9)*PPC) + (DV*(.1)*TF) + TiV*Ti;
TotalCost = ORcost + IRcost + Dcost;
#Outputting result to user
print("The total cost of your heart valve is $",format(TotalCost,'.2f'));
## Hemodynamic Calculations
SV = input("Enter in the Stroke Volume of the patient: ");
SV = int(SV);
HR = input("Enter in the Heart Rate of the patient: ");
HR = int(HR);
CO = SV * HR;
print("The Cardiac Output of the patient is ",CO);
SBP = input("Enter in the Systomic Blood Pressure of the patient: ");
SBP = int(SBP);
DBP = input("Enter in the Diastolic Blood Pressure of the patient: ");
DBP = int(DBP);
MAP = (((SBP) + (2 *(DBP)))/ 3);
print("The Mean Arterial Pressure of the patient is ",format(MAP, '.3f'));
CVP = input("Enter in the Central Venous Pressure of the patient: ");
CVP = int(CVP);
SVR = ((MAP - CVP)/(CO)) * 80;
print("The Systemic Vascular Resistance of the patient is ",format(SVR,'.3f'));
| 32.614458
| 87
| 0.655707
|
import math
pi = 3.14159265359;
area of the valve: ");
CS = int(CS);
vR = math.sqrt(CS/pi);
height = 5.0;
thinkness = 1.5;
DR = vR - (2*thinkness);
Diskheight = 1.5;
TiA = 0.1024 * pi;
TiV = 2*vR *TiA;
IRV = pi * pow((DR + thinkness), 2) - (pi * pow(DR, 2)) * height;
ORV = pi * pow((DR + (2*thinkness)), 2) - pi * pow((DR + thinkness),2) * height;
DV = pi * pow(DR, 2) * Diskheight;
NS = 100;
PPC = 0.00052;
TF = 0.00014;
Ti = 0.00064;
ORcost = ORV * TF + NS;
IRcost = IRV * PPC;
Dcost = (DV*(.9)*PPC) + (DV*(.1)*TF) + TiV*Ti;
TotalCost = ORcost + IRcost + Dcost;
print("The total cost of your heart valve is $",format(TotalCost,'.2f'));
troke Volume of the patient: ");
SV = int(SV);
HR = input("Enter in the Heart Rate of the patient: ");
HR = int(HR);
CO = SV * HR;
print("The Cardiac Output of the patient is ",CO);
SBP = input("Enter in the Systomic Blood Pressure of the patient: ");
SBP = int(SBP);
DBP = input("Enter in the Diastolic Blood Pressure of the patient: ");
DBP = int(DBP);
MAP = (((SBP) + (2 *(DBP)))/ 3);
print("The Mean Arterial Pressure of the patient is ",format(MAP, '.3f'));
CVP = input("Enter in the Central Venous Pressure of the patient: ");
CVP = int(CVP);
SVR = ((MAP - CVP)/(CO)) * 80;
print("The Systemic Vascular Resistance of the patient is ",format(SVR,'.3f'));
| true
| true
|
7905c553202de651223d53c0dd5e6502dc2b3f0a
| 2,348
|
py
|
Python
|
anyway/widgets/suburban_widgets/injured_count_by_accident_year_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | 1
|
2022-01-19T18:23:03.000Z
|
2022-01-19T18:23:03.000Z
|
anyway/widgets/suburban_widgets/injured_count_by_accident_year_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | 2
|
2021-11-02T13:37:23.000Z
|
2021-11-23T15:51:06.000Z
|
anyway/widgets/suburban_widgets/injured_count_by_accident_year_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | null | null | null |
from typing import Dict
from flask_babel import _
from anyway.backend_constants import InjurySeverity
from anyway.infographics_dictionaries import segment_dictionary
from anyway.models import InvolvedMarkerView
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import (
get_accidents_stats,
gen_entity_labels,
get_injured_filters,
format_2_level_items,
sort_and_fill_gaps_for_stacked_bar,
)
@register
class InjuredCountByAccidentYearWidget(SubUrbanWidget):
name: str = "injured_count_by_accident_year"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 9
self.information = (
"Fatal, severe and light injured count in the specified years, split by injury severity"
)
def generate_items(self) -> None:
res1 = get_accidents_stats(
table_obj=InvolvedMarkerView,
filters=get_injured_filters(self.request_params.location_info),
group_by=("accident_year", "injury_severity"),
count="injury_severity",
start_time=self.request_params.start_time,
end_time=self.request_params.end_time,
)
res2 = sort_and_fill_gaps_for_stacked_bar(
res1,
range(self.request_params.start_time.year, self.request_params.end_time.year + 1),
{
InjurySeverity.KILLED.value: 0,
InjurySeverity.SEVERE_INJURED.value: 0,
InjurySeverity.LIGHT_INJURED.value: 0,
},
)
self.items = format_2_level_items(res2, None, InjurySeverity)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Number of injured in accidents, per year, split by severity")
+ f" - {segment_dictionary[request_params.location_info['road_segment_name']]}",
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
_("Fatal, severe and light injured count in the specified years, split by injury severity")
| 37.870968
| 101
| 0.677172
|
from typing import Dict
from flask_babel import _
from anyway.backend_constants import InjurySeverity
from anyway.infographics_dictionaries import segment_dictionary
from anyway.models import InvolvedMarkerView
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import (
get_accidents_stats,
gen_entity_labels,
get_injured_filters,
format_2_level_items,
sort_and_fill_gaps_for_stacked_bar,
)
@register
class InjuredCountByAccidentYearWidget(SubUrbanWidget):
name: str = "injured_count_by_accident_year"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 9
self.information = (
"Fatal, severe and light injured count in the specified years, split by injury severity"
)
def generate_items(self) -> None:
res1 = get_accidents_stats(
table_obj=InvolvedMarkerView,
filters=get_injured_filters(self.request_params.location_info),
group_by=("accident_year", "injury_severity"),
count="injury_severity",
start_time=self.request_params.start_time,
end_time=self.request_params.end_time,
)
res2 = sort_and_fill_gaps_for_stacked_bar(
res1,
range(self.request_params.start_time.year, self.request_params.end_time.year + 1),
{
InjurySeverity.KILLED.value: 0,
InjurySeverity.SEVERE_INJURED.value: 0,
InjurySeverity.LIGHT_INJURED.value: 0,
},
)
self.items = format_2_level_items(res2, None, InjurySeverity)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Number of injured in accidents, per year, split by severity")
+ f" - {segment_dictionary[request_params.location_info['road_segment_name']]}",
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
_("Fatal, severe and light injured count in the specified years, split by injury severity")
| true
| true
|
7905c61340e209311a6142b0fb8d22cf5fd8af3a
| 1,690
|
py
|
Python
|
Model/imagescrape.py
|
danscime/Tip-Of-My-Shoe
|
0e6bdeb0bf161f51fd0ec4728c5f7e6f4d1d6128
|
[
"MIT"
] | 4
|
2019-10-04T21:16:07.000Z
|
2019-10-06T18:08:20.000Z
|
Model/imagescrape.py
|
danscime/Tip-Of-My-Shoe
|
0e6bdeb0bf161f51fd0ec4728c5f7e6f4d1d6128
|
[
"MIT"
] | 2
|
2021-05-11T10:27:27.000Z
|
2022-02-19T00:36:26.000Z
|
Model/imagescrape.py
|
danscime/Tip-Of-My-Shoe
|
0e6bdeb0bf161f51fd0ec4728c5f7e6f4d1d6128
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2
import argparse
import sys
searchterm = str(sys.argv[1]) # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Chrome()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0
if not os.path.exists(searchterm):
os.mkdir(searchterm)
for _ in range(500):
browser.execute_script("window.scrollBy(0,10000)")
for x in browser.find_elements_by_xpath('//div[contains(@class,"rg_meta")]'):
counter = counter + 1
print "Total Count:", counter
print "Succsessful Count:", succounter
print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]
if "mrporter" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "images.asos" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "famousfootwear" not in (json.loads(x.get_attribute('innerHTML'))["ou"]):
img = json.loads(x.get_attribute('innerHTML'))["ou"]
imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
File.write(raw_img)
File.close()
succounter = succounter + 1
except:
print "can't get img"
print succounter, "pictures succesfully downloaded"
browser.close()
| 39.302326
| 225
| 0.671006
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2
import argparse
import sys
searchterm = str(sys.argv[1])
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Chrome()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0
if not os.path.exists(searchterm):
os.mkdir(searchterm)
for _ in range(500):
browser.execute_script("window.scrollBy(0,10000)")
for x in browser.find_elements_by_xpath('//div[contains(@class,"rg_meta")]'):
counter = counter + 1
print "Total Count:", counter
print "Succsessful Count:", succounter
print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]
if "mrporter" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "images.asos" not in (json.loads(x.get_attribute('innerHTML'))["ou"]) and "famousfootwear" not in (json.loads(x.get_attribute('innerHTML'))["ou"]):
img = json.loads(x.get_attribute('innerHTML'))["ou"]
imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
try:
req = urllib2.Request(img, headers={'User-Agent': header})
raw_img = urllib2.urlopen(req).read()
File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
File.write(raw_img)
File.close()
succounter = succounter + 1
except:
print "can't get img"
print succounter, "pictures succesfully downloaded"
browser.close()
| false
| true
|
7905c70f99c164f6b3dd1cb3a5b80c7d6698f988
| 1,339
|
py
|
Python
|
src/doghouse/models.py
|
JuniorGunner/ConcilBackendTest
|
5ff07de350783e6168f01acd9a2fba0f9e81f66c
|
[
"MIT"
] | null | null | null |
src/doghouse/models.py
|
JuniorGunner/ConcilBackendTest
|
5ff07de350783e6168f01acd9a2fba0f9e81f66c
|
[
"MIT"
] | null | null | null |
src/doghouse/models.py
|
JuniorGunner/ConcilBackendTest
|
5ff07de350783e6168f01acd9a2fba0f9e81f66c
|
[
"MIT"
] | null | null | null |
from django.db import models
import os
def get_image_path(instance, filename):
return os.path.join('pics', str(instance.id), filename)
# Create your models here.
class Pets(models.Model):
pet_foto = models.ImageField(upload_to=get_image_path, blank=True, null=True)
DOG = 'C'
CAT = 'G'
ESPECIE_CHOICES = (
(DOG, 'Cachorro'),
(CAT, 'Gato')
)
especie = models.CharField(max_length=1, choices=ESPECIE_CHOICES, default=DOG)
PEQ = 'Pq'
MED = 'Md'
GDE = 'Gd'
PORTE_CHOICES = (
(PEQ, 'Pequeno'),
(MED, 'Médio'),
(GDE, 'Grande')
)
porte = models.CharField(max_length=2, choices=PORTE_CHOICES, default=GDE)
FILHOTE = 'F'
ADULTO = 'A'
IDADE_CHOICES = (
(FILHOTE, 'Filhote'),
(ADULTO, 'Adulto')
)
nome = models.CharField(max_length=50, null=False)
idade = models.CharField(max_length=1, choices=IDADE_CHOICES, default=ADULTO)
raca = models.CharField(max_length=100, null=False)
obs = models.TextField(max_length=500, null=True, blank=True)
def __str__(self):
return "pet_foto: {}\nEspecie: {}\nPorte: {}\nNome: {}\nIdade: {}\nRaça: {}\nObs.: {}"\
.format(self.pet_foto, self.especie, self.porte, self.nome, self.idade, self.raca, self.obs)
| 30.431818
| 101
| 0.605676
|
from django.db import models
import os
def get_image_path(instance, filename):
return os.path.join('pics', str(instance.id), filename)
class Pets(models.Model):
pet_foto = models.ImageField(upload_to=get_image_path, blank=True, null=True)
DOG = 'C'
CAT = 'G'
ESPECIE_CHOICES = (
(DOG, 'Cachorro'),
(CAT, 'Gato')
)
especie = models.CharField(max_length=1, choices=ESPECIE_CHOICES, default=DOG)
PEQ = 'Pq'
MED = 'Md'
GDE = 'Gd'
PORTE_CHOICES = (
(PEQ, 'Pequeno'),
(MED, 'Médio'),
(GDE, 'Grande')
)
porte = models.CharField(max_length=2, choices=PORTE_CHOICES, default=GDE)
FILHOTE = 'F'
ADULTO = 'A'
IDADE_CHOICES = (
(FILHOTE, 'Filhote'),
(ADULTO, 'Adulto')
)
nome = models.CharField(max_length=50, null=False)
idade = models.CharField(max_length=1, choices=IDADE_CHOICES, default=ADULTO)
raca = models.CharField(max_length=100, null=False)
obs = models.TextField(max_length=500, null=True, blank=True)
def __str__(self):
return "pet_foto: {}\nEspecie: {}\nPorte: {}\nNome: {}\nIdade: {}\nRaça: {}\nObs.: {}"\
.format(self.pet_foto, self.especie, self.porte, self.nome, self.idade, self.raca, self.obs)
| true
| true
|
7905c777b4c3b65f95f666f2c5676e7321beb303
| 2,772
|
py
|
Python
|
xclim/indices/__init__.py
|
gacou54/xclim
|
1fd3ef228ee48f547167bc43afa89615bdafb30b
|
[
"Apache-2.0"
] | null | null | null |
xclim/indices/__init__.py
|
gacou54/xclim
|
1fd3ef228ee48f547167bc43afa89615bdafb30b
|
[
"Apache-2.0"
] | null | null | null |
xclim/indices/__init__.py
|
gacou54/xclim
|
1fd3ef228ee48f547167bc43afa89615bdafb30b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Indices library
===============
This module describes climate indicator functions. Functions are listed in alphabetical order and describe the raw
computation performed over xarray.DataArrays. DataArrays should carry unit information to allow for any needed
unit conversions. The output's attributes (CF-Convention) are not modified. Validation checks and output attributes
are handled by indicator classes described in files named by the physical variable (temperature, precip, streamflow).
Notes for docstring
-------------------
The docstrings adhere to the `NumPy`_ style convention and is meant as a way to store CF-Convention metadata as
well as information relevant to third party libraries (such as a WPS server).
The first line of the docstring (the short summary), will be assigned to the output's `long_name` attribute. The
`long_name` attribute is defined by the NetCDF User Guide to contain a long descriptive name which may, for example,
be used for labeling plots
The second paragraph will be considered as the "*abstract*", or the CF global "*comment*" (miscellaneous information
about the data or methods used to produce it).
The third and fourth sections are the **Parameters** and **Returns** sections describing the input and output values
respectively.
.. code-block:: python
Parameters
----------
<standard_name> : xarray.DataArray
<Long_name> of variable [acceptable units].
threshold : string
Description of the threshold / units.
e.g. The 10th percentile of historical temperature [K].
freq : str, optional
Resampling frequency.
Returns
-------
xarray.DataArray
Output's <long_name> [units]
The next sections would be **Notes** and **References**:
.. code-block:: python
Notes
-----
This is where the mathematical equation is described.
At the end of the description, convention suggests
to add a reference [example]_:
.. math::
3987^12 + 4365^12 = 4472^12
References
----------
.. [example] Smith, T.J. and Huard, D. (2018). "CF Docstrings:
A manifesto on conventions and the metaphysical nature
of ontological python documentation." Climate Aesthetics,
vol. 1, pp. 121-155.
Indice descriptions
===================
.. _`NumPy`: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
"""
from ._simple import *
from ._threshold import *
from ._multivariate import *
# TODO: Define a unit conversion system for temperature [K, C, F] and precipitation [mm h-1, Kg m-2 s-1] metrics
# TODO: Move utility functions to another file.
# TODO: Should we reference the standard vocabulary we're using ?
# E.g. http://vocab.nerc.ac.uk/collection/P07/current/BHMHISG2/
| 35.088608
| 117
| 0.712843
|
from ._simple import *
from ._threshold import *
from ._multivariate import *
# E.g. http://vocab.nerc.ac.uk/collection/P07/current/BHMHISG2/
| true
| true
|
7905cae74c5d3b189f133d7c429d3cf6564cedf5
| 2,215
|
py
|
Python
|
ecom/paquetes/view_paquete.py
|
Gustolidel/Ikergust
|
7f20d917895dce4a3b65a8283be3f10141e53b11
|
[
"MIT"
] | null | null | null |
ecom/paquetes/view_paquete.py
|
Gustolidel/Ikergust
|
7f20d917895dce4a3b65a8283be3f10141e53b11
|
[
"MIT"
] | null | null | null |
ecom/paquetes/view_paquete.py
|
Gustolidel/Ikergust
|
7f20d917895dce4a3b65a8283be3f10141e53b11
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.views.generic import ListView,CreateView,UpdateView,DetailView,View
from django.shortcuts import render, redirect
from ecom import forms, models
from django.utils.decorators import method_decorator
def admin_required(function):
def wrap(request, *args, **kwargs):
if not request.user.groups.filter(name='Administrador').exists():
return redirect('')
return function(request, *args, **kwargs)
return wrap
class Agregar_paquete_view(CreateView):
# specify the model for create view
model = models.Paquete
form_class = forms.PaqueteForm
# specify the fields to be displayed
template_name = 'ecom/paquetes/Agregar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Agregar_paquete_view, self).dispatch(request, *args, **kwargs)
class paquete_view(View):
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(paquete_view, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
paquete = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/Ver_paquete.html',{"paquete": paquete})
class Actualizar_paquete(UpdateView):
model = models.Paquete #model
fields = "__all__" # fields / if you want to select all fields, use "__all__"
template_name = 'ecom/paquetes/Actualizar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Actualizar_paquete, self).dispatch(request, *args, **kwargs)
def paquetes(request):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
paquetes = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/paquete.html',{"paquetes":paquetes,"product_count_in_cart":product_count_in_cart })
| 36.916667
| 128
| 0.715576
|
from django.http import HttpResponseRedirect
from django.views.generic import ListView,CreateView,UpdateView,DetailView,View
from django.shortcuts import render, redirect
from ecom import forms, models
from django.utils.decorators import method_decorator
def admin_required(function):
def wrap(request, *args, **kwargs):
if not request.user.groups.filter(name='Administrador').exists():
return redirect('')
return function(request, *args, **kwargs)
return wrap
class Agregar_paquete_view(CreateView):
model = models.Paquete
form_class = forms.PaqueteForm
template_name = 'ecom/paquetes/Agregar_paquete.html'
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Agregar_paquete_view, self).dispatch(request, *args, **kwargs)
class paquete_view(View):
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(paquete_view, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
paquete = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/Ver_paquete.html',{"paquete": paquete})
class Actualizar_paquete(UpdateView):
model = models.Paquete
fields = "__all__"
template_name = 'ecom/paquetes/Actualizar_paquete.html'
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Actualizar_paquete, self).dispatch(request, *args, **kwargs)
def paquetes(request):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
paquetes = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/paquete.html',{"paquetes":paquetes,"product_count_in_cart":product_count_in_cart })
| true
| true
|
7905cb3aa60e8f9fa2a288064c25b63345eb6706
| 140,061
|
py
|
Python
|
tests/lax_numpy_test.py
|
vballoli/jax
|
bbf7a432e86053024419ec8adb90aae3d06afb18
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/lax_numpy_test.py
|
vballoli/jax
|
bbf7a432e86053024419ec8adb90aae3d06afb18
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/lax_numpy_test.py
|
vballoli/jax
|
bbf7a432e86053024419ec8adb90aae3d06afb18
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import itertools
import operator
from typing import cast, Optional
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
import jax.ops
from jax import api
from jax import lax
from jax import linear_util
from jax import numpy as jnp
from jax import test_util as jtu
from jax import dtypes
from jax import tree_util
from jax.interpreters import partial_eval, xla
from jax.test_util import check_grads
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
def supported_dtypes(dtypes):
return [t for t in dtypes if t in jtu.supported_dtypes()]
float_dtypes = supported_dtypes([jnp.bfloat16, onp.float16, onp.float32,
onp.float64])
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
uint_dtypes = [onp.uint32, onp.uint64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True, tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, onp.float32: 1e-3,
onp.float64: 1e-12, onp.complex64: 2e-4,
onp.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, -1.5, 1.5), ["rev"], inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.float64: 1e-7, onp.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 2e-2, onp.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={onp.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={onp.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, uint_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={onp.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float16: 1e-2}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={onp.float16: 1e-2, onp.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={onp.complex128: 1e-14}),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("sign", 1, number_dtypes + uint_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record('copysign', 2, default_dtypes, all_shapes, jtu.rand_some_inf_and_nan, [],
check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default, ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, inexact_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
# TODO(mattjj): lshift, rshift
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if onp_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (onp.ndarray, onp.generic)) else a
for a in out]
return f
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
if onp_op is onp.float_power:
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(onp_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory()
# onp and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, #not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, # not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory()
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory()
if not FLAGS.jax_enable_x64 and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, onp_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory()
@jtu.ignore_warning(category=onp.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
def onp_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(onp.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else onp.float32
return onp_op(x_cast, axis, dtype=t, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-3, onp.complex64: 1e-3,
onp.float64: 1e-5, onp.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory()
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.nonzero(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory()
if dtype == onp.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def onp_fun(a, b):
a = a.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else b
out = onp.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, onp.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-14,
onp.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
def onp_dot(x, y):
x = x.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else y
return onp.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(onp_dot, jnp.dot, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
def onp_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 4e-2
self._CheckAgainstNumpy(onp_fun, jnp.matmul, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testTensordotErrors(self):
a = onp.random.random((3, 2, 2))
b = onp.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": jtu.rand_default}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def onp_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(onp.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-13}
if jtu.device_under_test() == "tpu":
tol_spec[onp.float32] = tol_spec[onp.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
# TODO(phawkins): the promotion behavior changed in Numpy 1.17.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng_factory": jtu.rand_default}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng_factory):
rng = rng_factory()
if jnp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, onp.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.float32(7.5), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.float32(1.234), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank,
"rng_factory": jtu.rand_default,
"irng_factory": partial(jtu.rand_int, 3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
('edge', None, nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng_factory, irng_factory):
rng = rng_factory()
irng = irng_factory()
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def jnp_fun(x, kwargs):
return jnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng_factory": jtu.rand_default}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for num_arrs in [3]
for arg_dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(onp.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return onp.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for arg_dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(arr, values):
arr = arr.astype(onp.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(onp.float32) if values.dtype == jnp.bfloat16
else values)
out = onp.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng_factory": jtu.rand_default}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
onp_fun = _promote_like_jnp(onp_fun)
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue1233(self):
'''
Following numpy test suite from `test_repeat` at https://github.com/numpy/numpy/blob/master/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = onp.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol)
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
m = jnp.array([1,2,3,4,5,6])
args_maker = lambda: [m]
for repeats in [2, [1,3,2,1,1,2], [1,3,0,1,1,2], [2], jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, None)
m_rect = m.reshape((2,3))
args_maker = lambda: [m_rect]
for repeats in [2, [2,1], [2], jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, [1,3,2], [2], jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
onp_input = onp.ones((1))
jnp_input = jnp.ones((1))
expected_onp_input_after_call = onp.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertIs(type(jnp.concatenate([onp_input])), jnp.DeviceArray)
attempt_sideeffect(onp_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(onp_input, expected_onp_input_after_call, check_dtypes=True)
self.assertAllClose(jnp_input, expected_jnp_input_after_call, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"rng_factory": jtu.rand_default,
"jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in default_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, rng_factory, jnp_op, onp_op):
rng = rng_factory()
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
onp_fun = partial(onp_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode)
tol = 1e-2 if jtu.device_under_test() != "tpu" else 0.5
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default, "jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, jnp_op, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol = max(jtu.tolerance(dtype), jtu.tolerance(out_dtype))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
onp.testing.assert_equal(onp.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, onp.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, -1075, 1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jtu.skip_on_devices("tpu") # TODO(b/153053081)
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory()
x2_rng = x2_rng_factory()
onp_fun = lambda x1, x2: onp.ldexp(x1, x2)
onp_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, onp.int32)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
@jtu.skip_on_devices("tpu")
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory()
onp_fun = lambda x: onp.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
def onp_fun(arg):
if out_dtype == jnp.bfloat16:
return onp.trace(arg, offset, axis1, axis2, onp.float32).astype(jnp.bfloat16)
else:
return onp.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng_factory": rng_factory}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng_factory in [jtu.rand_default]))
def testStack(self, shape, axis, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(partial(onp.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng_factory": rng_factory}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng_factory in [jtu.rand_default]))
def testHVDStack(self, shape, op, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(getattr(onp, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng_factory": jtu.rand_default}
for shape in array_shapes + [3, onp.array(7, dtype=onp.int32)]
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"onp_op": getattr(onp, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), onp.array((4, 5, 6), dtype=onp.int32),
onp.array(4, dtype=onp.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, onp_op, jnp_op, shape, dtype):
rng = jtu.rand_default()
def args_maker(): return []
onp_op = partial(onp_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
jnp_fun = lambda x, fill_value: jnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng_factory": jtu.rand_default}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng_factory": jtu.rand_default}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3,), 0),
((1, 3), 1),
((1, 3, 1), (0, 1))]
for dtype in default_dtypes))
def testSqueezeFailsOnNonsingletonAxis(self, arg_shape, dtype, ax,
rng_factory):
rng = rng_factory()
x = jnp.zeros(arg_shape, dtype=dtype)
fun = lambda: jnp.squeeze(x, ax)
self.assertRaisesRegex(ValueError, "cannot select an axis to squeeze", fun)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng_factory": jtu.rand_default, "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory):
rng = rng_factory()
if weights_shape is None:
onp_fun = lambda x: onp.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
onp_fun = _promote_like_jnp(onp_fun, inexact=True)
tol = {onp.float16: 1e-2, onp.float32: 1e-6, onp.float64: 1e-12,}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}_ndmin={}".format(i, ndmin),
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtype) in enumerate([
([True, False, True], jnp.bool_),
(3., jnp.float_),
([1, 2, 3], jnp.int_),
([1., 2., 3.], jnp.float_),
([[1, 2], [3, 4], [5, 6]], jnp.int_),
([[1, 2.], [3, 4], [5, 6]], jnp.float_),
([[1., 2j], [3., 4.], [5., 6.]], jnp.complex_),
([[3, onp.array(2, dtype=jnp.float_), 1],
onp.arange(3., dtype=jnp.float_)], jnp.float_),
])
for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
dtype = dtypes.canonicalize_dtype(dtype)
if ndmin is not None:
onp_fun = partial(onp.array, ndmin=ndmin, dtype=dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin)
else:
onp_fun = partial(onp.array, dtype=dtype)
jnp_fun = jnp.array
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(jnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
@jtu.skip_on_devices("tpu") # TODO(b/32368900): TPUs don't support uint8 yet.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
onp.array([0x2a], dtype=onp.uint8),
check_dtypes=True)
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = onp.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = onp.asarray(n * [onp.inf]).reshape([n, 1])
nan = onp.asarray(n * [onp.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = onp.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = onp.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(jnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return jnp.all(jnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = jnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(jnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] # Test negative axes
for rng_factory in [jtu.rand_default]))
def testFlip(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFlipud(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFliplr(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testRot90(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(jnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=jnp.float_), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1, dtype=jnp.float_)
ans = jnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# manual tests for sort are nice because we don't have to worry about ties.
# lax.sort is tested combinatorially.
ans = jnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a) # last axis
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
def testMsortManually(self):
args_maker = lambda: [onp.random.randint(50, size=(5 ,5))]
jnp_op = lambda x: jnp.msort(x)
onp_op = lambda x: onp.msort(x)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]
for rng_factory in [jtu.rand_default]))
def testRoll(self, shape, dtype, shifts, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
onp_op = partial(onp.roll, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]
for rng_factory in [jtu.rand_default]))
def testRollaxis(self, shape, dtype, start, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
onp_op = partial(onp.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [onp.uint8, onp.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for rng_factory in [jtu.rand_some_zero]))
def testPackbits(self, shape, dtype, axis, bitorder, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder, "count": count}
for dtype in [onp.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]
for rng_factory in [jtu.rand_int]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory(0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng_factory": rng_factory, "rng_indices_factory": rng_indices_factory,
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng_factory in [jtu.rand_default]
for rng_indices_factory in [partial(jtu.rand_int, -5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode,
rng_factory, rng_indices_factory):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = rng_factory()
rng_indices = rng_indices_factory()
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ishape={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis),
"rng_factory": rng_factory, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
rng = rng_factory()
i_shape = onp.array(i_shape)
if axis is None:
i_shape = [onp.prod(i_shape, dtype=onp.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng_factory": jtu.rand_default}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng_factory):
rng = rng_factory()
def onp_fun(arg):
arg = arg.astype(onp.float32) if dtype == jnp.bfloat16 else arg
return onp.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol={onp.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng_factory": jtu.rand_some_inf_and_nan, "shape": shape,
"dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng_factory, shape, dtype):
rng = rng_factory()
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
def onp_fun(x):
if dtype == jnp.bfloat16:
x = onp.where(onp.isnan(x), dtype(0), x)
x = onp.where(onp.isposinf(x), jnp.finfo(dtype).max, x)
x = onp.where(onp.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return onp.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.int32,)),
(((3,), (4,)), (onp.int32, onp.int32)),
(((3,), (1,), (4,)), (onp.int32, onp.int32, onp.int32)),
)))
def testIx_(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, jnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.ix_, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_default(), "q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", jtu.rand_uniform(low=0., high=100.)),
("quantile", jtu.rand_uniform(low=0., high=1.)),
)
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [onp.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest', 'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
if op == "quantile" and numpy_version < (1, 15):
raise SkipTest("Numpy < 1.15 does not have np.quantile")
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return getattr(onp, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_a_shape={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"a_rng": jtu.rand_default(),
"a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]))
def testMedian(self, a_rng, a_shape, a_dtype, axis, keepdims):
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return onp.median(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(jnp.median, axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.where(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 3))
for dtypes in CombosWithReplacement(all_dtypes, 3)))
def testWhereThreeArgument(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes)
def onp_fun(cond, x, y):
return _promote_like_jnp(partial(onp.where, cond))(x, y)
self._CheckAgainstNumpy(onp_fun, jnp.where, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.where, args_maker, check_dtypes=True)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, onp.dtype(onp.float32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes,
(onp.bool_,) * n + dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for n in range(0, 3)
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 2 * n + 1))
for dtypes in CombosWithReplacement(all_dtypes, n + 1)))
def testSelect(self, rng_factory, shapes, dtypes):
rng = rng_factory()
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, onp.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def onp_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(onp.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return onp.select(condlist,
[onp.asarray(x, dtype=dtype) for x in choicelist],
onp.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(onp_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-7, onp.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = api.grad(test_fail)(x)
# assert not onp.any(onp.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = onp.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
onp_fun = lambda arg: getattr(onp, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[onp.array(3)],
[onp.array([3])],
[[onp.array(3)]],
[[onp.array([3])]],
[3, 4, 5],
[
[onp.eye(2, dtype=onp.int32) * 2, onp.zeros((2, 3), dtype=onp.int32)],
[onp.ones((3, 2), dtype=onp.int32), onp.eye(3, dtype=onp.int32) * 3],
],
[onp.array([1, 2, 3]), onp.array([2, 3, 4]), 10],
[onp.ones((2, 2), dtype=onp.int32), onp.zeros((2, 2), dtype=onp.int32)],
[[onp.array([1, 2, 3])], [onp.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(onp.block, jnp.block, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp.block, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
onp.arange(77, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13),
onp.arange(2, 13, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(4, 21, 9),
onp.arange(4, 21, 9, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(53, 5, -3),
onp.arange(53, 5, -3, dtype=jnp.int_),
check_dtypes=True)
self.assertAllClose(jnp.arange(77, dtype=float),
onp.arange(77, dtype=float), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13, dtype=int),
onp.arange(2, 13, dtype=int), check_dtypes=True)
self.assertAllClose(jnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5, dtype=jnp.float_),
check_dtypes=True)
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(onp.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(onp.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(onp.arange(77, dtype=onp.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(onp.int32, 77)))
# test laziness for int dtypes
self.assertTrue(xla.is_device_constant(jnp.arange(77)))
self.assertTrue(xla.is_device_constant(jnp.arange(77, dtype=jnp.int32)))
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(jnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=onp.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), onp.array([0., 0., 0., 0.25], dtype=onp.float32),
# check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_op)
jnp_op = getattr(jnp, op)
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf,
jnp.finfo(dtype).max, onp.sqrt(jnp.finfo(dtype).max),
onp.sqrt(jnp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("sinh", "cosh", "expm1", "exp"):
# TODO(b/133842876, b/133842870): these return wrong outputs on CPU for
# NaN inputs.
continue
if (op in ("sin", "cos", "tan", "arctan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789, b/134175194): fix and reenable.
x = dtype(x)
expected = onp_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {onp.float32: 1e-3, onp.float64: 1e-7})
self.assertAllClose(expected, actual, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims, "rng_factory": rng_factory}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]
for rng_factory in [jtu.rand_default]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
def onp_fun(x):
out = onp.var(x.astype(jnp.promote_types(onp.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-3, onp.complex128: 1e-6})
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu") # TODO(b/138003641): test fails on GPU.
def testCov(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
onp_fun = partial(onp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
jnp_fun = partial(jnp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
tol = {onp.float32: 1e-5, onp.float64: 1e-13, onp.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar,
"rng_factory": rng_factory}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]
for rng_factory in [jtu.rand_default]))
def testCorrCoef(self, shape, dtype, rowvar, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
mat = onp.asarray([rng(shape, dtype)])
onp_fun = partial(onp.corrcoef, rowvar=rowvar)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
if not onp.any(onp.isclose(onp.std(mat), 0.0)):
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=1e-2 if jtu.device_under_test() == "tpu" else None)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse, "rng_factory": rng_factory}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]
for rng_factory in [jtu.rand_default]))
def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
onp_fun = partial(onp.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep, dtype),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLinspace(self, start_shape, stop_shape, num, endpoint,
retstep, dtype, rng_factory):
if num == 1 and not endpoint and numpy_version < (1, 17, 5):
raise SkipTest("Numpy < 1.17.5 has a linspace bug.")
rng = rng_factory()
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else onp.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, onp.e]
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype, rng_factory):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not FLAGS.jax_enable_x64):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 2e-2, onp.float32: 1e-2, onp.float64: 1e-6,
onp.complex64: 1e-3, onp.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {onp.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}").format(
start_shape, stop_shape, num, endpoint, dtype),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, rng_factory):
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 4e-3, onp.float32: 2e-3, onp.complex128: 1e-14}
def args_maker():
"""Test the set of inputs onp.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# onp.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def onp_op(start, stop):
start = start.astype(onp.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(onp.float32) if dtype == jnp.bfloat16 else stop
return onp.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else onp.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return jnp.stack(x)
foo(onp.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(onp.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
wrapped = linear_util.wrap_init(f)
pv = partial_eval.PartialVal.unknown(jax.ShapedArray((3, 4), onp.float32))
_, _, consts = partial_eval.trace_to_jaxpr(wrapped, [pv])
self.assertFalse(
any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32))
for x in consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"rng_factory": rng_factory, "from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
]
for rng_factory in [jtu.rand_default])
def testBroadcastTo(self, from_shape, to_shape, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32])
onp_op = lambda x: onp.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(onp.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(onp.broadcast_to(10.0, ()), onp.ndarray)
def testPrecision(self):
ones_1d = onp.ones((2,))
ones_2d = onp.ones((2, 2))
ones_3d = onp.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_shape={}_axis={}_dtype={}").format(shape, axis, dtype),
"shape": shape,
"axis": axis,
"dtype": dtype, "rng_factory": rng_factory}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes
for rng_factory in [jtu.rand_default]))
def testGradient(self, shape, axis, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, axis=axis)
onp_fun = lambda y: onp.gradient(y, axis=axis)
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = onp.random.randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace(), check_dtypes=True)
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x),
check_dtypes=True)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
def num_float_bits(dtype):
return jnp.finfo(dtypes.canonicalize_dtype(dtype)).bits
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
absltest.main()
| 45.622476
| 131
| 0.64307
|
import collections
import functools
from functools import partial
import itertools
import operator
from typing import cast, Optional
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
import jax.ops
from jax import api
from jax import lax
from jax import linear_util
from jax import numpy as jnp
from jax import test_util as jtu
from jax import dtypes
from jax import tree_util
from jax.interpreters import partial_eval, xla
from jax.test_util import check_grads
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
def supported_dtypes(dtypes):
return [t for t in dtypes if t in jtu.supported_dtypes()]
float_dtypes = supported_dtypes([jnp.bfloat16, onp.float16, onp.float32,
onp.float64])
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
uint_dtypes = [onp.uint32, onp.uint64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
def _valid_dtypes_for_shape(shape, dtypes):
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True, tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, onp.float32: 1e-3,
onp.float64: 1e-12, onp.complex64: 2e-4,
onp.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, -1.5, 1.5), ["rev"], inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.float64: 1e-7, onp.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
]
JAX_COMPOUND_OP_RECORDS = [
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 2e-2, onp.float16: 1e-2}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={onp.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={onp.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, uint_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={onp.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float16: 1e-2}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={onp.float16: 1e-2, onp.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={onp.complex128: 1e-14}),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("sign", 1, number_dtypes + uint_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record('copysign', 2, default_dtypes, all_shapes, jtu.rand_some_inf_and_nan, [],
check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default, ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, inexact_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
class LaxBackedNumpyTests(jtu.JaxTestCase):
def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if onp_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (onp.ndarray, onp.generic)) else a
for a in out]
return f
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
if onp_op is onp.float_power:
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(onp_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented")
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory()
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory()
if not FLAGS.jax_enable_x64 and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, onp_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory()
@jtu.ignore_warning(category=onp.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
def onp_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(onp.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else onp.float32
return onp_op(x_cast, axis, dtype=t, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-3, onp.complex64: 1e-3,
onp.float64: 1e-5, onp.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory()
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
onp_fun = _promote_like_jnp(onp_fun, inexact)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.nonzero(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory()
if dtype == onp.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)],
[(2, 4), (2, 4), (-1, -1, -1, 0)],
[(3, 4), (3, 4), (-1, -1, -1, 0)],
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)],
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)],
[(6, 1, 3), (5, 3), (-1, -1, -1, None)],
[(6, 1, 2), (5, 3), (-1, -1, -1, None)],
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)],
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)],
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)]
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def onp_fun(a, b):
a = a.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else b
out = onp.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, onp.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-14,
onp.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
def onp_dot(x, y):
x = x.astype(onp.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(onp.float32) if rhs_dtype == jnp.bfloat16 else y
return onp.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(onp_dot, jnp.dot, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
def onp_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 4e-2
self._CheckAgainstNumpy(onp_fun, jnp.matmul, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], [(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testTensordotErrors(self):
a = onp.random.random((3, 2, 2))
b = onp.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": jtu.rand_default}
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def onp_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(onp.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(onp.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return onp.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-13}
if jtu.device_under_test() == "tpu":
tol_spec[onp.float32] = tol_spec[onp.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng_factory": jtu.rand_default}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng_factory):
rng = rng_factory()
if jnp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, onp.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.float32(7.5), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.float32(1.234), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(onp.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank,
"rng_factory": jtu.rand_default,
"irng_factory": partial(jtu.rand_int, 3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
('edge', None, nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng_factory, irng_factory):
rng = rng_factory()
irng = irng_factory()
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def jnp_fun(x, kwargs):
return jnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng_factory": jtu.rand_default}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for num_arrs in [3]
for arg_dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(onp.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return onp.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for arg_dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(arr, values):
arr = arr.astype(onp.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(onp.float32) if values.dtype == jnp.bfloat16
else values)
out = onp.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng_factory": jtu.rand_default}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
onp_fun = _promote_like_jnp(onp_fun)
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue1233(self):
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = onp.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol)
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
m = jnp.array([1,2,3,4,5,6])
args_maker = lambda: [m]
for repeats in [2, [1,3,2,1,1,2], [1,3,0,1,1,2], [2], jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, None)
m_rect = m.reshape((2,3))
args_maker = lambda: [m_rect]
for repeats in [2, [2,1], [2], jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, [1,3,2], [2], jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
onp_input = onp.ones((1))
jnp_input = jnp.ones((1))
expected_onp_input_after_call = onp.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertIs(type(jnp.concatenate([onp_input])), jnp.DeviceArray)
attempt_sideeffect(onp_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(onp_input, expected_onp_input_after_call, check_dtypes=True)
self.assertAllClose(jnp_input, expected_jnp_input_after_call, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"rng_factory": jtu.rand_default,
"jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in default_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, rng_factory, jnp_op, onp_op):
rng = rng_factory()
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
onp_fun = partial(onp_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode)
tol = 1e-2 if jtu.device_under_test() != "tpu" else 0.5
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default, "jnp_op": getattr(jnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, jnp_op, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
onp_fun = jtu.ignore_warning(category=onp.ComplexWarning)(onp_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol = max(jtu.tolerance(dtype), jtu.tolerance(out_dtype))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
onp.testing.assert_equal(onp.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, onp.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, -1075, 1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jtu.skip_on_devices("tpu")
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
if (x1_dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory()
x2_rng = x2_rng_factory()
onp_fun = lambda x1, x2: onp.ldexp(x1, x2)
onp_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, onp.int32)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
@jtu.skip_on_devices("tpu")
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, onp.float16, onp.float32]
and not FLAGS.jax_enable_x64):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory()
onp_fun = lambda x: onp.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
def onp_fun(arg):
if out_dtype == jnp.bfloat16:
return onp.trace(arg, offset, axis1, axis2, onp.float32).astype(jnp.bfloat16)
else:
return onp.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng_factory": rng_factory}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng_factory in [jtu.rand_default]))
def testStack(self, shape, axis, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(partial(onp.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng_factory": rng_factory}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng_factory in [jtu.rand_default]))
def testHVDStack(self, shape, op, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_jnp(getattr(onp, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng_factory": jtu.rand_default}
for shape in array_shapes + [3, onp.array(7, dtype=onp.int32)]
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"onp_op": getattr(onp, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), onp.array((4, 5, 6), dtype=onp.int32),
onp.array(4, dtype=onp.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, onp_op, jnp_op, shape, dtype):
rng = jtu.rand_default()
def args_maker(): return []
onp_op = partial(onp_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
jnp_fun = lambda x, fill_value: jnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng_factory": jtu.rand_default}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng_factory": jtu.rand_default}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3,), 0),
((1, 3), 1),
((1, 3, 1), (0, 1))]
for dtype in default_dtypes))
def testSqueezeFailsOnNonsingletonAxis(self, arg_shape, dtype, ax,
rng_factory):
rng = rng_factory()
x = jnp.zeros(arg_shape, dtype=dtype)
fun = lambda: jnp.squeeze(x, ax)
self.assertRaisesRegex(ValueError, "cannot select an axis to squeeze", fun)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng_factory": jtu.rand_default, "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory):
rng = rng_factory()
if weights_shape is None:
onp_fun = lambda x: onp.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
onp_fun = _promote_like_jnp(onp_fun, inexact=True)
tol = {onp.float16: 1e-2, onp.float32: 1e-6, onp.float64: 1e-12,}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}_ndmin={}".format(i, ndmin),
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtype) in enumerate([
([True, False, True], jnp.bool_),
(3., jnp.float_),
([1, 2, 3], jnp.int_),
([1., 2., 3.], jnp.float_),
([[1, 2], [3, 4], [5, 6]], jnp.int_),
([[1, 2.], [3, 4], [5, 6]], jnp.float_),
([[1., 2j], [3., 4.], [5., 6.]], jnp.complex_),
([[3, onp.array(2, dtype=jnp.float_), 1],
onp.arange(3., dtype=jnp.float_)], jnp.float_),
])
for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
dtype = dtypes.canonicalize_dtype(dtype)
if ndmin is not None:
onp_fun = partial(onp.array, ndmin=ndmin, dtype=dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin)
else:
onp_fun = partial(onp.array, dtype=dtype)
jnp_fun = jnp.array
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(jnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
@jtu.skip_on_devices("tpu") # TODO(b/32368900): TPUs don't support uint8 yet.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
onp.array([0x2a], dtype=onp.uint8),
check_dtypes=True)
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = onp.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = onp.asarray(n * [onp.inf]).reshape([n, 1])
nan = onp.asarray(n * [onp.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = onp.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = onp.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(jnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return jnp.all(jnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu")
def testOnesBroadcastingConstantHandler(self):
self.skipTest("test needs jax3 update")
def fun(x):
ones = jnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
c = x._node.c
Broadcast = c.Broadcast
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(jnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] # Test negative axes
for rng_factory in [jtu.rand_default]))
def testFlip(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFlipud(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFliplr(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testRot90(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(jnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=jnp.float_), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1, dtype=jnp.float_)
ans = jnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# manual tests for sort are nice because we don't have to worry about ties.
ans = jnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a)
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = jnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = jnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
def testMsortManually(self):
args_maker = lambda: [onp.random.randint(50, size=(5 ,5))]
jnp_op = lambda x: jnp.msort(x)
onp_op = lambda x: onp.msort(x)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]
for rng_factory in [jtu.rand_default]))
def testRoll(self, shape, dtype, shifts, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
onp_op = partial(onp.roll, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]
for rng_factory in [jtu.rand_default]))
def testRollaxis(self, shape, dtype, start, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
onp_op = partial(onp.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [onp.uint8, onp.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for rng_factory in [jtu.rand_some_zero]))
def testPackbits(self, shape, dtype, axis, bitorder, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder, "count": count}
for dtype in [onp.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]
for rng_factory in [jtu.rand_int]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count, rng_factory):
if numpy_version < (1, 17, 0):
raise SkipTest("bitorder arg added in numpy 1.17.0")
rng = rng_factory(0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
onp_op = partial(onp.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng_factory": rng_factory, "rng_indices_factory": rng_indices_factory,
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng_factory in [jtu.rand_default]
for rng_indices_factory in [partial(jtu.rand_int, -5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode,
rng_factory, rng_indices_factory):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = rng_factory()
rng_indices = rng_indices_factory()
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ishape={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis),
"rng_factory": rng_factory, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
rng = rng_factory()
i_shape = onp.array(i_shape)
if axis is None:
i_shape = [onp.prod(i_shape, dtype=onp.int64)]
else:
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(jnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng_factory": jtu.rand_default}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng_factory):
rng = rng_factory()
def onp_fun(arg):
arg = arg.astype(onp.float32) if dtype == jnp.bfloat16 else arg
return onp.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol={onp.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng_factory": jtu.rand_some_inf_and_nan, "shape": shape,
"dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng_factory, shape, dtype):
rng = rng_factory()
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
def onp_fun(x):
if dtype == jnp.bfloat16:
x = onp.where(onp.isnan(x), dtype(0), x)
x = onp.where(onp.isposinf(x), jnp.finfo(dtype).max, x)
x = onp.where(onp.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return onp.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.int32,)),
(((3,), (4,)), (onp.int32, onp.int32)),
(((3,), (1,), (4,)), (onp.int32, onp.int32, onp.int32)),
)))
def testIx_(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, jnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.ix_, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_default(), "q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", jtu.rand_uniform(low=0., high=100.)),
("quantile", jtu.rand_uniform(low=0., high=1.)),
)
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [onp.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest', 'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
if op == "quantile" and numpy_version < (1, 15):
raise SkipTest("Numpy < 1.15 does not have np.quantile")
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return getattr(onp, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_a_shape={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"a_rng": jtu.rand_default(),
"a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]))
def testMedian(self, a_rng, a_shape, a_dtype, axis, keepdims):
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def onp_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return onp.median(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(jnp.median, axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.where(x)
onp_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(onp_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 3))
for dtypes in CombosWithReplacement(all_dtypes, 3)))
def testWhereThreeArgument(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes)
def onp_fun(cond, x, y):
return _promote_like_jnp(partial(onp.where, cond))(x, y)
self._CheckAgainstNumpy(onp_fun, jnp.where, args_maker,
check_dtypes=True)
self._CompileAndCheck(jnp.where, args_maker, check_dtypes=True)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, onp.dtype(onp.float32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes,
(onp.bool_,) * n + dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for n in range(0, 3)
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 2 * n + 1))
for dtypes in CombosWithReplacement(all_dtypes, n + 1)))
def testSelect(self, rng_factory, shapes, dtypes):
rng = rng_factory()
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, onp.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def onp_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(onp.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return onp.select(condlist,
[onp.asarray(x, dtype=dtype) for x in choicelist],
onp.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(onp_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-7, onp.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
a = onp.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
onp_fun = lambda arg: getattr(onp, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[onp.array(3)],
[onp.array([3])],
[[onp.array(3)]],
[[onp.array([3])]],
[3, 4, 5],
[
[onp.eye(2, dtype=onp.int32) * 2, onp.zeros((2, 3), dtype=onp.int32)],
[onp.ones((3, 2), dtype=onp.int32), onp.eye(3, dtype=onp.int32) * 3],
],
[onp.array([1, 2, 3]), onp.array([2, 3, 4]), 10],
[onp.ones((2, 2), dtype=onp.int32), onp.zeros((2, 2), dtype=onp.int32)],
[[onp.array([1, 2, 3])], [onp.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(onp.block, jnp.block, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp.block, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
self.assertAllClose(jnp.arange(77),
onp.arange(77, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13),
onp.arange(2, 13, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(4, 21, 9),
onp.arange(4, 21, 9, dtype=jnp.int_), check_dtypes=True)
self.assertAllClose(jnp.arange(53, 5, -3),
onp.arange(53, 5, -3, dtype=jnp.int_),
check_dtypes=True)
self.assertAllClose(jnp.arange(77, dtype=float),
onp.arange(77, dtype=float), check_dtypes=True)
self.assertAllClose(jnp.arange(2, 13, dtype=int),
onp.arange(2, 13, dtype=int), check_dtypes=True)
self.assertAllClose(jnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5, dtype=jnp.float_),
check_dtypes=True)
self.assertRaises(TypeError, lambda: jnp.arange())
self.assertNotEqual(type(jnp.arange(77)), type(onp.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(onp.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(onp.arange(77, dtype=onp.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(onp.int32, 77)))
self.assertTrue(xla.is_device_constant(jnp.arange(77)))
self.assertTrue(xla.is_device_constant(jnp.arange(77, dtype=jnp.int32)))
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(jnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4)
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=onp.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), onp.array([0., 0., 0., 0.25], dtype=onp.float32),
# check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(onp_op)
onp_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(onp_op)
jnp_op = getattr(jnp, op)
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf,
jnp.finfo(dtype).max, onp.sqrt(jnp.finfo(dtype).max),
onp.sqrt(jnp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("sinh", "cosh", "expm1", "exp"):
# TODO(b/133842876, b/133842870): these return wrong outputs on CPU for
# NaN inputs.
continue
if (op in ("sin", "cos", "tan", "arctan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789, b/134175194): fix and reenable.
x = dtype(x)
expected = onp_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {onp.float32: 1e-3, onp.float64: 1e-7})
self.assertAllClose(expected, actual, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self):
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims, "rng_factory": rng_factory}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]
for rng_factory in [jtu.rand_default]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
def onp_fun(x):
out = onp.var(x.astype(jnp.promote_types(onp.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-3, onp.complex128: 1e-6})
self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu")
def testCov(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
onp_fun = partial(onp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
jnp_fun = partial(jnp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
tol = {onp.float32: 1e-5, onp.float64: 1e-13, onp.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar,
"rng_factory": rng_factory}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]
for rng_factory in [jtu.rand_default]))
def testCorrCoef(self, shape, dtype, rowvar, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
mat = onp.asarray([rng(shape, dtype)])
onp_fun = partial(onp.corrcoef, rowvar=rowvar)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
if not onp.any(onp.isclose(onp.std(mat), 0.0)):
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False,
tol=1e-2 if jtu.device_under_test() == "tpu" else None)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse, "rng_factory": rng_factory}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]
for rng_factory in [jtu.rand_default]))
def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
onp_fun = partial(onp.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep, dtype),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLinspace(self, start_shape, stop_shape, num, endpoint,
retstep, dtype, rng_factory):
if num == 1 and not endpoint and numpy_version < (1, 17, 5):
raise SkipTest("Numpy < 1.17.5 has a linspace bug.")
rng = rng_factory()
tol = jtu.tolerance(dtype if dtype else onp.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, onp.e]
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype, rng_factory):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not FLAGS.jax_enable_x64):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 2e-2, onp.float32: 1e-2, onp.float64: 1e-6,
onp.complex64: 1e-3, onp.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {onp.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}").format(
start_shape, stop_shape, num, endpoint, dtype),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, rng_factory):
rng = rng_factory()
tol = {onp.float16: 4e-3, onp.float32: 2e-3, onp.complex128: 1e-14}
def args_maker():
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def onp_op(start, stop):
start = start.astype(onp.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(onp.float32) if dtype == jnp.bfloat16 else stop
return onp.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else onp.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testStackArrayArgument(self):
@api.jit
def foo(x):
return jnp.stack(x)
foo(onp.zeros(2))
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(onp.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
wrapped = linear_util.wrap_init(f)
pv = partial_eval.PartialVal.unknown(jax.ShapedArray((3, 4), onp.float32))
_, _, consts = partial_eval.trace_to_jaxpr(wrapped, [pv])
self.assertFalse(
any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32))
for x in consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"rng_factory": rng_factory, "from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
]
for rng_factory in [jtu.rand_default])
def testBroadcastTo(self, from_shape, to_shape, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32])
onp_op = lambda x: onp.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(onp_op, jnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=True)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(onp.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(onp.broadcast_to(10.0, ()), onp.ndarray)
def testPrecision(self):
ones_1d = onp.ones((2,))
ones_2d = onp.ones((2, 2))
ones_3d = onp.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_shape={}_axis={}_dtype={}").format(shape, axis, dtype),
"shape": shape,
"axis": axis,
"dtype": dtype, "rng_factory": rng_factory}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes
for rng_factory in [jtu.rand_default]))
def testGradient(self, shape, axis, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, axis=axis)
onp_fun = lambda y: onp.gradient(y, axis=axis)
self._CheckAgainstNumpy(
onp_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = onp.random.randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace(), check_dtypes=True)
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x),
check_dtypes=True)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
def num_float_bits(dtype):
return jnp.finfo(dtypes.canonicalize_dtype(dtype)).bits
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
absltest.main()
| true
| true
|
7905cc126bfce14e1c236fb532e87a55bc65f722
| 906
|
py
|
Python
|
pycounter/test/test_db_common.py
|
loadbrain/pycounter
|
e5533d0ebe3685df0c29f8e491ea6dd1c14c6b66
|
[
"MIT"
] | null | null | null |
pycounter/test/test_db_common.py
|
loadbrain/pycounter
|
e5533d0ebe3685df0c29f8e491ea6dd1c14c6b66
|
[
"MIT"
] | 2
|
2020-08-27T14:12:44.000Z
|
2020-08-27T14:16:18.000Z
|
pycounter/test/test_db_common.py
|
loadbrain/pycounter
|
e5533d0ebe3685df0c29f8e491ea6dd1c14c6b66
|
[
"MIT"
] | null | null | null |
"""Common DB report tests."""
import datetime
from pycounter.constants import METRICS
def test_version(db_report):
assert db_report.report_version == 4
def test_year(db_report):
assert db_report.year == 2012
def test_publisher(db_report):
for publication in db_report:
assert publication.publisher == u"Megadodo Publications"
def test_platform(db_report):
for publication in db_report:
assert publication.platform == u"HHGTTG Online"
def test_customer(db_report):
assert db_report.customer == u"University of Maximegalon"
def test_date_run(db_report):
assert db_report.date_run == datetime.date(2012, 7, 9)
def test_period(db_report):
assert db_report.period == (datetime.date(2012, 1, 1), datetime.date(2012, 6, 30))
def test_report_metric(db_report):
for metric in db_report.metric:
assert metric in METRICS[db_report.report_type]
| 22.65
| 86
| 0.737307
|
import datetime
from pycounter.constants import METRICS
def test_version(db_report):
assert db_report.report_version == 4
def test_year(db_report):
assert db_report.year == 2012
def test_publisher(db_report):
for publication in db_report:
assert publication.publisher == u"Megadodo Publications"
def test_platform(db_report):
for publication in db_report:
assert publication.platform == u"HHGTTG Online"
def test_customer(db_report):
assert db_report.customer == u"University of Maximegalon"
def test_date_run(db_report):
assert db_report.date_run == datetime.date(2012, 7, 9)
def test_period(db_report):
assert db_report.period == (datetime.date(2012, 1, 1), datetime.date(2012, 6, 30))
def test_report_metric(db_report):
for metric in db_report.metric:
assert metric in METRICS[db_report.report_type]
| true
| true
|
7905cc3fc390402329e2b7318d745e309b05f5f7
| 7,144
|
py
|
Python
|
controlpyweb/reader_writer.py
|
washad/ControlPyWeb
|
9f83cada68cf0df05bc8b63238bee311eab6e37d
|
[
"MIT"
] | null | null | null |
controlpyweb/reader_writer.py
|
washad/ControlPyWeb
|
9f83cada68cf0df05bc8b63238bee311eab6e37d
|
[
"MIT"
] | 3
|
2020-03-31T04:56:27.000Z
|
2021-06-01T23:56:16.000Z
|
controlpyweb/reader_writer.py
|
washad/ControlPyWeb
|
9f83cada68cf0df05bc8b63238bee311eab6e37d
|
[
"MIT"
] | 1
|
2021-02-23T02:05:15.000Z
|
2021-02-23T02:05:15.000Z
|
"""
Module Reader Writer
This module provide the ReaderWriter class as a concrete implemenation of the AbstractReaderWriter. It handles
the implementation details of interfacing with the hardware.
"""
from controlpyweb.abstract_reader_writer import AbstractReaderWriter
import requests
import json
from typing import Union, Optional, List
import time
import threading
from controlpyweb.errors import ControlPyWebAddressNotFoundError, WebIOConnectionError
lock = threading.Lock()
class ReaderWriter(AbstractReaderWriter):
def __init__(self, url: str, demand_address_exists: bool = True, timeout: float = 10.0,
keep_alive: bool = True, **kwargs):
"""
:param url: The address of the IO Base module from/to which IO is written
"""
url = 'http://{}'.format(url) if 'http' not in url else url
url = '{}/customState.json'.format(url)
self._url = url # type: str
self._io = dict()
self._previous_read_io = dict()
self._changes = dict()
self._first_read = False
self._last_hardware_read_time = None # type: time.time
self._req = requests if not keep_alive else requests.Session()
self.update_reads_on_write = bool(kwargs.get('update_reads_on_write', False))
self.demand_address_exists = demand_address_exists
self.timeout = timeout
@property
def last_hardware_read_time(self):
return self._last_hardware_read_time
def _check_for_address(self, addr: str):
if not self.demand_address_exists:
return
if not self._first_read:
return
if self._io is None:
return
if addr not in self._io:
raise ControlPyWebAddressNotFoundError(addr)
def _get(self, timeout: float = None) -> dict:
""" Does an http get and returns the results as key/value pairs"""
timeout = self.timeout if timeout is None else timeout
self._first_read = True
r = self._req.get(self._url, timeout=timeout)
r = None if r is None else r.json()
return r
@staticmethod
def _value_to_str(value):
if isinstance(value, bool):
value = '1' if value else '0'
return str(value)
@property
def changes(self):
"""Returns a dictionary of all changes made since the last read or write"""
return self._changes
def dumps(self, changes_only: bool = False):
"""Returns the current IO key/values as json string"""
with lock:
if changes_only:
if len(self._changes) == 0:
return ''
return json.dumps(self._changes)
return json.dumps(self._io)
def flush_changes(self):
""" Erases the collection of changes stored in memory"""
with lock:
self._changes = dict()
def loads(self, json_str: str):
"""Replaces the current IO key/values with that from the json string"""
with lock:
self._first_read = True
self._io = json.loads(json_str)
def read(self, addr: str) -> Optional[Union[bool, int, float, str]]:
"""
Returns the value of a single IO from the memory store
"""
with lock:
if not self._first_read:
return None
self._check_for_address(addr)
val = self._io.get(addr)
return val
def read_immediate(self, addr: str, timeout: float = None) -> object:
"""
Makes a hardware call to the base module to retrieve the value of the IO. This is inefficient and should
be used sparingly.
"""
try:
self._check_for_address(addr)
timeout = self.timeout if timeout is None else timeout
vals = self._get(timeout=timeout)
if vals is None:
return None
return vals.get(addr)
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def to_hardware(self, timeout: float = None):
""" Same as send_changes_to_hardware"""
return self.send_changes_to_hardware(timeout)
def send_changes_to_hardware(self, timeout: float = None):
""" Takes the collection of changes made using the write command and
sends them all to the hardware collectively. """
try:
with lock:
if self._changes is None or len(self._changes) == 0:
return
timeout = self.timeout if timeout is None else timeout
self._req.get(self._url, params=self._changes, timeout=timeout)
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def from_hardware(self, timeout: float = None):
""" Same as update_from_hardware"""
self.update_from_hardware(timeout)
def update_from_hardware(self, timeout: float = None):
"""Makes a hardware call to the base module to retrieve the value of all IOs, storing their
results in memory."""
try:
timeout = self.timeout if timeout is None else timeout
with lock:
vals = self._get(timeout)
self._last_hardware_read_time = time.time()
if vals is not None:
self._io = vals
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def write(self, addr: str, value: object) -> None:
"""
Stores the write value in memory to be written as part of a group write when changes are sent to
hardware."""
with lock:
to_str = self._value_to_str(value)
if self.update_reads_on_write:
self._io[addr] = value
self._changes[addr] = to_str
def write_immediate(self, addr: Union[str, List[str]],
value: Union[object, List[object]], timeout: float = None):
"""
Instead of waiting for a group write, writes the given value immediately. Note, this is not very efficient
and should be used sparingly. """
if isinstance(addr, list):
if isinstance(value, list):
items = {addr: self._value_to_str(val) for addr, val in zip(addr, value)}
else:
value = self._value_to_str(value)
items = {addr: value for addr in addr}
else:
items = {addr: self._value_to_str(value)}
try:
timeout = self.timeout if timeout is None else timeout
with lock:
self._req.get(self._url, params=items, timeout=timeout)
for addr, value in items.items():
self._io[addr] = value
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
| 38
| 114
| 0.613522
|
from controlpyweb.abstract_reader_writer import AbstractReaderWriter
import requests
import json
from typing import Union, Optional, List
import time
import threading
from controlpyweb.errors import ControlPyWebAddressNotFoundError, WebIOConnectionError
lock = threading.Lock()
class ReaderWriter(AbstractReaderWriter):
def __init__(self, url: str, demand_address_exists: bool = True, timeout: float = 10.0,
keep_alive: bool = True, **kwargs):
url = 'http://{}'.format(url) if 'http' not in url else url
url = '{}/customState.json'.format(url)
self._url = url
self._io = dict()
self._previous_read_io = dict()
self._changes = dict()
self._first_read = False
self._last_hardware_read_time = None
self._req = requests if not keep_alive else requests.Session()
self.update_reads_on_write = bool(kwargs.get('update_reads_on_write', False))
self.demand_address_exists = demand_address_exists
self.timeout = timeout
@property
def last_hardware_read_time(self):
return self._last_hardware_read_time
def _check_for_address(self, addr: str):
if not self.demand_address_exists:
return
if not self._first_read:
return
if self._io is None:
return
if addr not in self._io:
raise ControlPyWebAddressNotFoundError(addr)
def _get(self, timeout: float = None) -> dict:
timeout = self.timeout if timeout is None else timeout
self._first_read = True
r = self._req.get(self._url, timeout=timeout)
r = None if r is None else r.json()
return r
@staticmethod
def _value_to_str(value):
if isinstance(value, bool):
value = '1' if value else '0'
return str(value)
@property
def changes(self):
return self._changes
def dumps(self, changes_only: bool = False):
with lock:
if changes_only:
if len(self._changes) == 0:
return ''
return json.dumps(self._changes)
return json.dumps(self._io)
def flush_changes(self):
with lock:
self._changes = dict()
def loads(self, json_str: str):
with lock:
self._first_read = True
self._io = json.loads(json_str)
def read(self, addr: str) -> Optional[Union[bool, int, float, str]]:
with lock:
if not self._first_read:
return None
self._check_for_address(addr)
val = self._io.get(addr)
return val
def read_immediate(self, addr: str, timeout: float = None) -> object:
try:
self._check_for_address(addr)
timeout = self.timeout if timeout is None else timeout
vals = self._get(timeout=timeout)
if vals is None:
return None
return vals.get(addr)
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def to_hardware(self, timeout: float = None):
return self.send_changes_to_hardware(timeout)
def send_changes_to_hardware(self, timeout: float = None):
try:
with lock:
if self._changes is None or len(self._changes) == 0:
return
timeout = self.timeout if timeout is None else timeout
self._req.get(self._url, params=self._changes, timeout=timeout)
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def from_hardware(self, timeout: float = None):
self.update_from_hardware(timeout)
def update_from_hardware(self, timeout: float = None):
try:
timeout = self.timeout if timeout is None else timeout
with lock:
vals = self._get(timeout)
self._last_hardware_read_time = time.time()
if vals is not None:
self._io = vals
self.flush_changes()
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
def write(self, addr: str, value: object) -> None:
with lock:
to_str = self._value_to_str(value)
if self.update_reads_on_write:
self._io[addr] = value
self._changes[addr] = to_str
def write_immediate(self, addr: Union[str, List[str]],
value: Union[object, List[object]], timeout: float = None):
if isinstance(addr, list):
if isinstance(value, list):
items = {addr: self._value_to_str(val) for addr, val in zip(addr, value)}
else:
value = self._value_to_str(value)
items = {addr: value for addr in addr}
else:
items = {addr: self._value_to_str(value)}
try:
timeout = self.timeout if timeout is None else timeout
with lock:
self._req.get(self._url, params=items, timeout=timeout)
for addr, value in items.items():
self._io[addr] = value
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex)
| true
| true
|
7905ceba182a6a5b754db003e43fdf42d6548217
| 3,876
|
py
|
Python
|
tests/unit/bokeh/application/handlers/test_notebook__handlers.py
|
teresafds/bokeh
|
95b2a74ff463cfabdf9e3390951fa380166e6691
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/bokeh/application/handlers/test_notebook__handlers.py
|
teresafds/bokeh
|
95b2a74ff463cfabdf9e3390951fa380166e6691
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/bokeh/application/handlers/test_notebook__handlers.py
|
teresafds/bokeh
|
95b2a74ff463cfabdf9e3390951fa380166e6691
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import nbconvert
import nbformat
from packaging import version
# Bokeh imports
from bokeh._testing.util.filesystem import with_temporary_file
from bokeh.document import Document
# Module under test
import bokeh.application.handlers.notebook as bahn # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def with_script_contents(contents, func):
def with_file_object(f):
nbsource = nbformat.writes(contents)
f.write(nbsource.encode("UTF-8"))
f.flush()
func(f.name)
with_temporary_file(with_file_object)
class Test_NotebookHandler:
# Public methods ----------------------------------------------------------
def test_runner_strips_line_magics(self, ipython) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
source.cells.append(nbformat.v4.new_code_cell('%time'))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_strips_cell_magics(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
code = '%%timeit\n1+1'
source.cells.append(nbformat.v4.new_code_cell(code))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_uses_source_from_filename(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
result = {}
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
result['filename'] = filename
with_script_contents(source, load)
assert result['handler']._runner.path == result['filename']
if version.parse(nbconvert.__version__) < version.parse("5.4"):
expected_source = "\n# coding: utf-8\n"
else:
expected_source = "#!/usr/bin/env python\n# coding: utf-8\n"
assert result['handler']._runner.source == expected_source
assert not doc.roots
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.914286
| 79
| 0.440402
|
from __future__ import annotations
import pytest ; pytest
import nbconvert
import nbformat
from packaging import version
from bokeh._testing.util.filesystem import with_temporary_file
from bokeh.document import Document
import bokeh.application.handlers.notebook as bahn
def with_script_contents(contents, func):
def with_file_object(f):
nbsource = nbformat.writes(contents)
f.write(nbsource.encode("UTF-8"))
f.flush()
func(f.name)
with_temporary_file(with_file_object)
class Test_NotebookHandler:
def test_runner_strips_line_magics(self, ipython) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
source.cells.append(nbformat.v4.new_code_cell('%time'))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_strips_cell_magics(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
code = '%%timeit\n1+1'
source.cells.append(nbformat.v4.new_code_cell(code))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_uses_source_from_filename(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
result = {}
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
result['filename'] = filename
with_script_contents(source, load)
assert result['handler']._runner.path == result['filename']
if version.parse(nbconvert.__version__) < version.parse("5.4"):
expected_source = "\n# coding: utf-8\n"
else:
expected_source = "#!/usr/bin/env python\n# coding: utf-8\n"
assert result['handler']._runner.source == expected_source
assert not doc.roots
| true
| true
|
7905cf5018ca6a1aa4b55ea08316f727e216c71c
| 39,816
|
py
|
Python
|
test/functional/p2p_compactblocks.py
|
thinkgandhi/bitcoin
|
a5623ba89f050182ce9b1f570f3736b272b544b2
|
[
"MIT"
] | 114
|
2020-07-15T08:54:36.000Z
|
2022-02-06T07:57:06.000Z
|
test/functional/p2p_compactblocks.py
|
XSWLO/bitcoin
|
b931f61b9ab098ea4ea8fbe4cbf0b03c566c3f63
|
[
"MIT"
] | 13
|
2019-11-12T15:06:08.000Z
|
2021-09-27T08:10:40.000Z
|
test/functional/p2p_compactblocks.py
|
XSWLO/bitcoin
|
b931f61b9ab098ea4ea8fbe4cbf0b03c566c3f63
|
[
"MIT"
] | 43
|
2019-08-31T10:39:01.000Z
|
2022-02-04T13:11:46.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until, softfork_active
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self, cmpct_version):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
self.cmpct_version = cmpct_version
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
block = self.build_block_on_tip(self.nodes[0])
self.segwit_node.send_and_ping(msg_no_witness_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generatetoaddress(100, self.nodes[0].getnewaddress(address_type="bech32"))
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.segwit_node.send_and_ping(msg_no_witness_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, test_node, old_node=None):
preferred_version = test_node.cmpct_version
node = self.nodes[0]
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version + 1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, test_node, use_witness_address=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, test_node, segwit=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_blocktxn()
else:
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_no_witness_blocktxn()
if with_witness:
msg_bt = msg_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_no_witness_blocktxn()
if version == 2:
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_block(block))
else:
test_node.send_and_ping(msg_no_witness_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, test_node, use_segwit=True):
node = self.nodes[0]
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer):
node = self.nodes[0]
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = peer.cmpct_version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
assert softfork_active(self.nodes[0], "segwit")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node, old_node=self.old_node)
self.test_sendcmpct(self.additional_segwit_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.old_node)
self.test_compactblock_construction(self.segwit_node)
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.segwit_node)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.segwit_node)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.segwit_node)
self.test_getblocktxn_handler(self.old_node)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.segwit_node)
self.test_compactblocks_not_at_tip(self.old_node)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.old_node)
self.request_cb_announcements(self.segwit_node)
self.test_end_to_end_block_relay([self.segwit_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.segwit_node)
self.test_invalid_tx_in_compactblock(self.old_node)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| 46.568421
| 500
| 0.672092
|
import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until, softfork_active
class TestP2PConn(P2PInterface):
def __init__(self, cmpct_version):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
self.cmpct_version = cmpct_version
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
block = self.build_block_on_tip(self.nodes[0])
self.segwit_node.send_and_ping(msg_no_witness_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generatetoaddress(100, self.nodes[0].getnewaddress(address_type="bech32"))
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.segwit_node.send_and_ping(msg_no_witness_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
def test_sendcmpct(self, test_node, old_node=None):
preferred_version = test_node.cmpct_version
node = self.nodes[0]
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version + 1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, test_node, use_witness_address=True):
version = test_node.cmpct_version
node = self.nodes[0]
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, test_node, segwit=True):
version = test_node.cmpct_version
node = self.nodes[0]
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_blocktxn()
else:
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_no_witness_blocktxn()
if with_witness:
msg_bt = msg_blocktxn()
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
def test_incorrect_blocktxn_response(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# enough for now.
msg = msg_no_witness_blocktxn()
if version == 2:
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_block(block))
else:
test_node.send_and_ping(msg_no_witness_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, test_node):
version = test_node.cmpct_version
node = self.nodes[0]
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, test_node):
node = self.nodes[0]
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, listeners):
node = self.nodes[0]
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, test_node, use_segwit=True):
node = self.nodes[0]
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
def request_cb_announcements(self, peer):
node = self.nodes[0]
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = peer.cmpct_version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_no_witness_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=1), services=NODE_NETWORK)
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
assert softfork_active(self.nodes[0], "segwit")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node, old_node=self.old_node)
self.test_sendcmpct(self.additional_segwit_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.old_node)
self.test_compactblock_construction(self.segwit_node)
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.segwit_node)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.segwit_node)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.segwit_node)
self.test_getblocktxn_handler(self.old_node)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.segwit_node)
self.test_compactblocks_not_at_tip(self.old_node)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
# Test that if we submitblock to node1, we'll get a compact block
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.old_node)
self.request_cb_announcements(self.segwit_node)
self.test_end_to_end_block_relay([self.segwit_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.segwit_node)
self.test_invalid_tx_in_compactblock(self.old_node)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| true
| true
|
7905cf6747a6dad7faa66fb3e21b6a8f8787f32a
| 9,591
|
py
|
Python
|
pyBAScloudAPI/docs/conf.py
|
bascloud/BASCloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | 3
|
2021-04-30T07:44:11.000Z
|
2021-05-03T06:35:01.000Z
|
pyBAScloudAPI/docs/conf.py
|
bascloud/BASCloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | 9
|
2021-06-23T04:21:51.000Z
|
2022-01-17T04:15:06.000Z
|
pyBAScloudAPI/docs/conf.py
|
bascloud/BAScloudAPI
|
6a06d430720e99204f84f5362b4f22d7d4a72b76
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# python_exameple documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 26 00:29:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyBAScloudAPI'
copyright = u'2021, ProFM Facility & Project Management GmbH'
author = u'ProFM Facility & Project Management GmbH'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyBAScloudAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyBAScloudAPI.tex', u'pyBAScloudAPI Documentation',
u'ProFM Facility & Project Management GmbH', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
author, 'pyBAScloudAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.511864
| 79
| 0.719737
|
import sys
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
autosummary_generate = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'pyBAScloudAPI'
copyright = u'2021, ProFM Facility & Project Management GmbH'
author = u'ProFM Facility & Project Management GmbH'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyBAScloudAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyBAScloudAPI.tex', u'pyBAScloudAPI Documentation',
u'ProFM Facility & Project Management GmbH', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation',
author, 'pyBAScloudAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
intersphinx_mapping = {'https://docs.python.org/': None}
| true
| true
|
7905d01ce3d05f9088ae98cc3cdcfc1316a720cc
| 753
|
py
|
Python
|
mod/models/user.py
|
HeraldStudio/herald_auth
|
f6aa59f830ce3c7edd6afabcdf8f583ffad52582
|
[
"MIT"
] | 6
|
2015-09-10T06:44:25.000Z
|
2017-02-22T00:13:07.000Z
|
mod/models/user.py
|
HeraldStudio/herald_auth
|
f6aa59f830ce3c7edd6afabcdf8f583ffad52582
|
[
"MIT"
] | null | null | null |
mod/models/user.py
|
HeraldStudio/herald_auth
|
f6aa59f830ce3c7edd6afabcdf8f583ffad52582
|
[
"MIT"
] | 3
|
2017-03-22T07:50:31.000Z
|
2018-05-19T13:58:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2014-10-26 13:02:58
# @Author : yml_bright@163.com
from sqlalchemy import Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from db import dbengine, Base
class User(Base):
__tablename__ = 'user'
cardnum = Column(String(10), primary_key=True)
number = Column(String(50), nullable=True)
password = Column(String(50), nullable=False)
pe_password = Column(String(50), nullable=True)
lib_username = Column(String(50), nullable=True)
lib_password = Column(String(50), nullable=True)
card_query_pwd = Column(String(50), nullable=True)
card_consume_pwd = Column(String(50), nullable=True)
state = Column(Integer, nullable=False)
| 35.857143
| 56
| 0.713147
|
from sqlalchemy import Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from db import dbengine, Base
class User(Base):
__tablename__ = 'user'
cardnum = Column(String(10), primary_key=True)
number = Column(String(50), nullable=True)
password = Column(String(50), nullable=False)
pe_password = Column(String(50), nullable=True)
lib_username = Column(String(50), nullable=True)
lib_password = Column(String(50), nullable=True)
card_query_pwd = Column(String(50), nullable=True)
card_consume_pwd = Column(String(50), nullable=True)
state = Column(Integer, nullable=False)
| true
| true
|
7905d0bcd302b5858170c94d003ee28831928a26
| 2,464
|
py
|
Python
|
master/searx-master/searx/engines/duckduckgo_images.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
master/searx-master/searx/engines/duckduckgo_images.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
master/searx-master/searx/engines/duckduckgo_images.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
"""
DuckDuckGo (Images)
@website https://duckduckgo.com/
@provide-api yes (https://duckduckgo.com/api),
but images are not supported
@using-api no
@results JSON (site requires js to get images)
@stable no (JSON can change)
@parse url, title, img_src
@todo avoid extra request
"""
from json import loads
from searx.engines.xpath import extract_text
from searx.engines.duckduckgo import (
_fetch_supported_languages, supported_languages_url,
get_region_code, language_aliases
)
from searx.poolrequests import get
from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
paging = True
language_support = True
safesearch = True
# search-url
images_url = 'https://duckduckgo.com/i.js?{query}&s={offset}&p={safesearch}&o=json&vqd={vqd}'
site_url = 'https://duckduckgo.com/?{query}&iar=images&iax=1&ia=images'
# run query in site to get vqd number needed for requesting images
# TODO: find a way to get this number without an extra request (is it a hash of the query?)
def get_vqd(query):
res = get(site_url.format(query=urlencode({'q': query})))
content = res.text
vqd = content[content.find('vqd=\'') + 5:]
vqd = vqd[:vqd.find('\'')]
return vqd
# do search-request
def request(query, params):
# to avoid running actual external requests when testing
if 'is_test' not in params:
vqd = get_vqd(query)
else:
vqd = '12345'
offset = (params['pageno'] - 1) * 50
safesearch = params['safesearch'] - 1
region_code = get_region_code(params['language'], lang_list=supported_languages)
params['url'] = images_url.format(
query=urlencode({'q': query, 'l': region_code}), offset=offset, safesearch=safesearch, vqd=vqd)
return params
# get response from search-request
def response(resp):
results = []
content = resp.text
try:
res_json = loads(content)
except:
return []
# parse results
for result in res_json['results']:
title = result['title']
url = result['url']
thumbnail = result['thumbnail']
image = result['image']
# append result
results.append({'template': 'images.html',
'title': title,
'content': '',
'thumbnail_src': thumbnail,
'img_src': image,
'url': url})
return results
| 27.076923
| 103
| 0.631088
|
from json import loads
from searx.engines.xpath import extract_text
from searx.engines.duckduckgo import (
_fetch_supported_languages, supported_languages_url,
get_region_code, language_aliases
)
from searx.poolrequests import get
from searx.url_utils import urlencode
categories = ['images']
paging = True
language_support = True
safesearch = True
images_url = 'https://duckduckgo.com/i.js?{query}&s={offset}&p={safesearch}&o=json&vqd={vqd}'
site_url = 'https://duckduckgo.com/?{query}&iar=images&iax=1&ia=images'
def get_vqd(query):
res = get(site_url.format(query=urlencode({'q': query})))
content = res.text
vqd = content[content.find('vqd=\'') + 5:]
vqd = vqd[:vqd.find('\'')]
return vqd
def request(query, params):
if 'is_test' not in params:
vqd = get_vqd(query)
else:
vqd = '12345'
offset = (params['pageno'] - 1) * 50
safesearch = params['safesearch'] - 1
region_code = get_region_code(params['language'], lang_list=supported_languages)
params['url'] = images_url.format(
query=urlencode({'q': query, 'l': region_code}), offset=offset, safesearch=safesearch, vqd=vqd)
return params
def response(resp):
results = []
content = resp.text
try:
res_json = loads(content)
except:
return []
for result in res_json['results']:
title = result['title']
url = result['url']
thumbnail = result['thumbnail']
image = result['image']
results.append({'template': 'images.html',
'title': title,
'content': '',
'thumbnail_src': thumbnail,
'img_src': image,
'url': url})
return results
| true
| true
|
7905d121152c9a5dbbb1d610c539c4ff89d3458a
| 7,942
|
py
|
Python
|
config/settings/production.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | 1
|
2019-02-15T09:05:35.000Z
|
2019-02-15T09:05:35.000Z
|
config/settings/production.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
config/settings/production.py
|
clairempr/bureau
|
c9fd114e637829b4e9ff643459d15602cc2efc2f
|
[
"Apache-2.0"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['127.0.0.1'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='The Bureau <noreply@127.0.0.1>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[The Bureau]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| 39.316832
| 96
| 0.605515
|
from .base import *
from .base import env
= env('DJANGO_SECRET_KEY')
= env.list('DJANGO_ALLOWED_HOSTS', default=['127.0.0.1'])
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
': True,
}
}
}
= ('HTTP_X_FORWARDED_PROTO', 'https')
= env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
= True
= True
env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
= env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
PS += ['storages']
SS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
ET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
YSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='The Bureau <noreply@127.0.0.1>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[The Bureau]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| true
| true
|
7905d1b96e157fc5ccef56121e70b3a3436ece19
| 51
|
py
|
Python
|
sync_lingq/_version.py
|
CiaranCurran/auto-sync-lingq
|
d501a1faebf216114126936df1aa33b2facc9109
|
[
"MIT"
] | 6
|
2021-03-03T16:12:42.000Z
|
2022-02-09T23:05:10.000Z
|
sync_lingq/_version.py
|
CiaranCurran/auto-sync-lingq
|
d501a1faebf216114126936df1aa33b2facc9109
|
[
"MIT"
] | 3
|
2021-05-24T04:05:30.000Z
|
2021-10-31T19:59:29.000Z
|
sync_lingq/_version.py
|
CiaranCurran/auto-sync-lingq
|
d501a1faebf216114126936df1aa33b2facc9109
|
[
"MIT"
] | null | null | null |
"""
Version information
"""
__version__ = "1.0.0"
| 8.5
| 21
| 0.627451
|
__version__ = "1.0.0"
| true
| true
|
7905d37a3b847296ee879088d177e5c67cc3a518
| 1,357
|
py
|
Python
|
1101-1200/1189-Minesweeper/1189-Minesweeper.py
|
jiadaizhao/LintCode
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 77
|
2017-12-30T13:33:37.000Z
|
2022-01-16T23:47:08.000Z
|
1101-1200/1189-Minesweeper/1189-Minesweeper.py
|
jxhangithub/LintCode-1
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 1
|
2018-05-14T14:15:40.000Z
|
2018-05-14T14:15:40.000Z
|
1101-1200/1189-Minesweeper/1189-Minesweeper.py
|
jxhangithub/LintCode-1
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 39
|
2017-12-07T14:36:25.000Z
|
2022-03-10T23:05:37.000Z
|
import collections
class Solution:
"""
@param board: a board
@param click: the position
@return: the new board
"""
def updateBoard(self, board, click):
# Write your code here
b = []
for s in board:
temp = []
for c in s:
temp.append(c)
b.append(temp)
row, col = click
if b[row][col] == 'M':
b[row][col] = 'X'
else:
m, n = len(board), len(board[0])
Q = collections.deque([(row, col)])
b[row][col] = 'B'
while Q:
r, c = Q.popleft()
count = 0
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'M':
count += 1
if count > 0:
b[r][c] = str(count)
else:
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'E':
Q.append((nr, nc))
b[nr][nc] = 'B'
return [''.join(row) for row in b]
| 34.794872
| 121
| 0.342668
|
import collections
class Solution:
def updateBoard(self, board, click):
b = []
for s in board:
temp = []
for c in s:
temp.append(c)
b.append(temp)
row, col = click
if b[row][col] == 'M':
b[row][col] = 'X'
else:
m, n = len(board), len(board[0])
Q = collections.deque([(row, col)])
b[row][col] = 'B'
while Q:
r, c = Q.popleft()
count = 0
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'M':
count += 1
if count > 0:
b[r][c] = str(count)
else:
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'E':
Q.append((nr, nc))
b[nr][nc] = 'B'
return [''.join(row) for row in b]
| true
| true
|
7905d38cd81a9cd50dd87fd8ca7b86b6aa30c57b
| 59,333
|
py
|
Python
|
numpy/core/tests/test_regression.py
|
dlax/numpy
|
c869d124d08665b703539c9b2f14ec6ffbae0be0
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_regression.py
|
dlax/numpy
|
c869d124d08665b703539c9b2f14ec6ffbae0be0
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_regression.py
|
dlax/numpy
|
c869d124d08665b703539c9b2f14ec6ffbae0be0
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from StringIO import StringIO
from os import path
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert_(abs(arr-res2).max() < 1e-8, func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4',())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert_(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s,v): x[(s>0)]=v
self.assertRaises(ValueError,ia,x,s,np.zeros(9,dtype=float))
self.assertRaises(ValueError,ia,x,s,np.zeros(11,dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:,0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:,0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1,2,3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert_(dat.info == 'jubba')
dat.resize((4,2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2,3,4],[6,3,4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32,0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2,7).info == 'jubba')
assert_(dat.compress([0,1,1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32,0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2,4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0,1).info == 'jubba')
assert_(dat.take([2,3,5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
olderr = np.seterr(invalid='ignore')
try:
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
finally:
np.seterr(**olderr)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0,1] == x[0,0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
finally:
warn_ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([],['?','?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1,0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1,2,3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1],[s2],[s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1,17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1,2],[3,4]]).T
y = np.array(x.flat)
assert_equal(x, [[1,3],[2,4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_structured_type_to_object(self):
a_rec = np.array([(0,1), (3,2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0,1)
a_obj[1] = (3,2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2,2), dtype=object)
a[...] = [[1,2]]
assert_equal(a, [[1,2], [1,2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([u'abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_maskna_deallocation(self):
# This caused a segfault when running under python-debug
a = np.array([1]).view(maskna=True)
del a
if __name__ == "__main__":
run_module_suite()
| 34.860752
| 92
| 0.541992
|
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from StringIO import StringIO
from os import path
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
self.assertRaises(ValueError, np.array, [1,'object'], dt)
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert_(abs(arr-res2).max() < 1e-8, func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4',())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert_(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s,v): x[(s>0)]=v
self.assertRaises(ValueError,ia,x,s,np.zeros(9,dtype=float))
self.assertRaises(ValueError,ia,x,s,np.zeros(11,dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:,0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:,0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1,2,3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr
assert_(not arr[0].deleted)
arr[:] = arr
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
z = np.ones((1, y.shape[0]))
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert_(dat.info == 'jubba')
dat.resize((4,2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2,3,4],[6,3,4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32,0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2,7).info == 'jubba')
assert_(dat.compress([0,1,1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32,0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2,4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0,1).info == 'jubba')
assert_(dat.take([2,3,5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
olderr = np.seterr(invalid='ignore')
try:
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
finally:
np.seterr(**olderr)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0]
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0,1] == x[0,0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
finally:
warn_ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra)
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1])
def test_find_common_type_boolean(self):
assert_(np.find_common_type([],['?','?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_object_array_self_reference(self):
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1,0]), [])
def test_objectarray_setfield(self):
x = np.array([1,2,3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1],[s2],[s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1,17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1,2],[3,4]]).T
y = np.array(x.flat)
assert_equal(x, [[1,3],[2,4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_structured_type_to_object(self):
a_rec = np.array([(0,1), (3,2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0,1)
a_obj[1] = (3,2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2,2), dtype=object)
a[...] = [[1,2]]
assert_equal(a, [[1,2], [1,2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
fo[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([u'abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_maskna_deallocation(self):
# This caused a segfault when running under python-debug
a = np.array([1]).view(maskna=True)
del a
if __name__ == "__main__":
run_module_suite()
| false
| true
|
7905d3a0eb0c4fe5190482b197166a5b7c5bae75
| 10,823
|
py
|
Python
|
tests/test_expval.py
|
wongwsvincent/pennylane-cirq
|
14f421a31016949ec6f3172f90e7f06a6674e913
|
[
"Apache-2.0"
] | null | null | null |
tests/test_expval.py
|
wongwsvincent/pennylane-cirq
|
14f421a31016949ec6f3172f90e7f06a6674e913
|
[
"Apache-2.0"
] | null | null | null |
tests/test_expval.py
|
wongwsvincent/pennylane-cirq
|
14f421a31016949ec6f3172f90e7f06a6674e913
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that exectation values are correctly computed in the plugin devices"""
import pytest
import numpy as np
import pennylane as qml
from contextlib import contextmanager
from conftest import U, U2, A, B
np.random.seed(42)
@contextmanager
def mimic_execution_for_expval(device):
device.reset()
with device.execution_context():
yield
if not device.shots is None:
device._samples = device.generate_samples()
@pytest.mark.parametrize("shots", [None, 8192])
class TestExpval:
"""Test expectation values"""
def test_identity_expectation(self, device, shots, tol):
"""Test that identity expectation value (i.e. the trace) is 1"""
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.Identity
name = "Identity"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([1, 1]), **tol)
def test_pauliz_expectation(self, device, shots, tol):
"""Test that PauliZ expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.PauliZ
name = "PauliZ"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), **tol)
def test_paulix_expectation(self, device, shots, tol):
"""Test that PauliX expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliX
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)]), **tol)
def test_pauliy_expectation(self, device, shots, tol):
"""Test that PauliY expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliY
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([0, -(np.cos(theta)) * np.sin(phi)]), **tol)
def test_hadamard_expectation(self, device, shots, tol):
"""Test that Hadamard expectation value is correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hadamard
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
expected = np.array(
[
np.sin(theta) * np.sin(phi) + np.cos(theta),
np.cos(theta) * np.cos(phi) + np.sin(phi),
]
) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian_expectation(self, device, shots, tol):
"""Test that arbitrary Hermitian expectation values are correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(A, wires=[0], do_queue=False).diagonalizing_gates()
+ O(A, wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [
O(A, wires=[0], do_queue=False),
O(A, wires=[1], do_queue=False),
]
res = np.array(
[
dev.expval(O(A, wires=[0], do_queue=False)),
dev.expval(O(A, wires=[1], do_queue=False)),
]
)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2
expected = np.array([ev1, ev2])
assert np.allclose(res, expected, **tol)
def test_multi_mode_hermitian_expectation(self, device, shots, tol):
"""Test that arbitrary multi-mode Hermitian expectation values are correct"""
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(B, wires=[0, 1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(B, wires=[0, 1], do_queue=False)]
res = np.array([dev.expval(O(B, wires=[0, 1], do_queue=False))])
# below is the analytic expectation value for this circuit with arbitrary
# Hermitian observable B
expected = 0.5 * (
6 * np.cos(theta) * np.sin(phi)
- np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)
- 2 * np.sin(phi)
- 6 * np.cos(phi)
- 6
)
assert np.allclose(res, expected, **tol)
@pytest.mark.parametrize("shots", [None, 8192])
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, device, shots, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = qml.PauliX(wires=[0], do_queue=False) @ qml.PauliY(wires=[2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, **tol)
def test_pauliz_hadamard(self, device, shots, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = (
qml.PauliZ(wires=[0], do_queue=False)
@ qml.Hadamard(wires=[1], do_queue=False)
@ qml.PauliY(wires=[2], do_queue=False)
)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian(self, device, shots, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(wires=[0], do_queue=False) @ qml.Hermitian(A, wires=[1, 2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, **tol)
| 32.896657
| 100
| 0.530537
|
import pytest
import numpy as np
import pennylane as qml
from contextlib import contextmanager
from conftest import U, U2, A, B
np.random.seed(42)
@contextmanager
def mimic_execution_for_expval(device):
device.reset()
with device.execution_context():
yield
if not device.shots is None:
device._samples = device.generate_samples()
@pytest.mark.parametrize("shots", [None, 8192])
class TestExpval:
def test_identity_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.Identity
name = "Identity"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([1, 1]), **tol)
def test_pauliz_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),]
)
O = qml.PauliZ
name = "PauliZ"
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), **tol)
def test_paulix_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliX
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)]), **tol)
def test_pauliy_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.PauliY
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
assert np.allclose(res, np.array([0, -(np.cos(theta)) * np.sin(phi)]), **tol)
def test_hadamard_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hadamard
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(wires=[0], do_queue=False).diagonalizing_gates()
+ O(wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(wires=[0], do_queue=False), O(wires=[1], do_queue=False)]
res = np.array(
[dev.expval(O(wires=[0], do_queue=False)), dev.expval(O(wires=[1], do_queue=False)),]
)
expected = np.array(
[
np.sin(theta) * np.sin(phi) + np.cos(theta),
np.cos(theta) * np.cos(phi) + np.sin(phi),
]
) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(A, wires=[0], do_queue=False).diagonalizing_gates()
+ O(A, wires=[1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [
O(A, wires=[0], do_queue=False),
O(A, wires=[1], do_queue=False),
]
res = np.array(
[
dev.expval(O(A, wires=[0], do_queue=False)),
dev.expval(O(A, wires=[1], do_queue=False)),
]
)
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2
expected = np.array([ev1, ev2])
assert np.allclose(res, expected, **tol)
def test_multi_mode_hermitian_expectation(self, device, shots, tol):
theta = 0.432
phi = 0.123
dev = device(2)
O = qml.Hermitian
with mimic_execution_for_expval(dev):
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1]),],
rotations=O(B, wires=[0, 1], do_queue=False).diagonalizing_gates(),
)
dev._obs_queue = [O(B, wires=[0, 1], do_queue=False)]
res = np.array([dev.expval(O(B, wires=[0, 1], do_queue=False))])
expected = 0.5 * (
6 * np.cos(theta) * np.sin(phi)
- np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)
- 2 * np.sin(phi)
- 6 * np.cos(phi)
- 6
)
assert np.allclose(res, expected, **tol)
@pytest.mark.parametrize("shots", [None, 8192])
class TestTensorExpval:
def test_paulix_pauliy(self, device, shots, tol):
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = qml.PauliX(wires=[0], do_queue=False) @ qml.PauliY(wires=[2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, **tol)
def test_pauliz_hadamard(self, device, shots, tol):
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
obs = (
qml.PauliZ(wires=[0], do_queue=False)
@ qml.Hadamard(wires=[1], do_queue=False)
@ qml.PauliY(wires=[2], do_queue=False)
)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, **tol)
def test_hermitian(self, device, shots, tol):
theta = 0.432
phi = 0.123
varphi = -0.543
dev = device(3)
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
obs = qml.PauliZ(wires=[0], do_queue=False) @ qml.Hermitian(A, wires=[1, 2], do_queue=False)
with mimic_execution_for_expval(dev):
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, **tol)
| true
| true
|
7905d411f3a76d40986783aa30859dda3aa9cae8
| 1,044
|
py
|
Python
|
dashboard/stashboard/content/database_clusters/panel.py
|
rmyers/clouddb-rpc
|
fba67c7cb20d93bfc6ebb10976b407e4121b30e4
|
[
"Apache-2.0"
] | null | null | null |
dashboard/stashboard/content/database_clusters/panel.py
|
rmyers/clouddb-rpc
|
fba67c7cb20d93bfc6ebb10976b407e4121b30e4
|
[
"Apache-2.0"
] | null | null | null |
dashboard/stashboard/content/database_clusters/panel.py
|
rmyers/clouddb-rpc
|
fba67c7cb20d93bfc6ebb10976b407e4121b30e4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Clusters(horizon.Panel):
name = _("Clusters")
slug = 'database_clusters'
permissions = ('openstack.services.database',
'openstack.services.object-store',)
dashboard.Project.register(Clusters)
| 33.677419
| 78
| 0.733716
|
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Clusters(horizon.Panel):
name = _("Clusters")
slug = 'database_clusters'
permissions = ('openstack.services.database',
'openstack.services.object-store',)
dashboard.Project.register(Clusters)
| true
| true
|
7905d4a8cd64f015208d8cb6b5586e8898cfba3a
| 6,846
|
py
|
Python
|
src/conda_package_handling/api.py
|
katietz/conda-package-handling
|
e1347ffd31a69c9278885151c70f50e0abb65b67
|
[
"BSD-3-Clause"
] | null | null | null |
src/conda_package_handling/api.py
|
katietz/conda-package-handling
|
e1347ffd31a69c9278885151c70f50e0abb65b67
|
[
"BSD-3-Clause"
] | null | null | null |
src/conda_package_handling/api.py
|
katietz/conda-package-handling
|
e1347ffd31a69c9278885151c70f50e0abb65b67
|
[
"BSD-3-Clause"
] | null | null | null |
import os as _os
from glob import glob as _glob
import functools as _functools
from concurrent.futures import ProcessPoolExecutor as _Executor
import tempfile as _tempfile
from six import string_types as _string_types
import tqdm as _tqdm
# expose these two exceptions as part of the API. Everything else should feed into these.
from .exceptions import ConversionError, InvalidArchiveError # NOQA
from .tarball import CondaTarBZ2 as _CondaTarBZ2, libarchive_enabled # NOQA
from .conda_fmt import CondaFormat_v2 as _CondaFormat_v2
from .utils import TemporaryDirectory as _TemporaryDirectory, rm_rf as _rm_rf
SUPPORTED_EXTENSIONS = {'.tar.bz2': _CondaTarBZ2,
'.conda': _CondaFormat_v2}
def _collect_paths(prefix):
dir_paths, file_paths = [], []
for dp, dn, filenames in _os.walk(prefix):
for f in filenames:
file_paths.append(_os.path.relpath(_os.path.join(dp, f), prefix))
dir_paths.extend(_os.path.relpath(_os.path.join(dp, _), prefix) for _ in dn)
file_list = file_paths + [dp for dp in dir_paths
if not any(f.startswith(dp) for f in file_paths)]
return file_list
def get_default_extracted_folder(in_file):
dirname = None
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
dirname = _os.path.basename(in_file)[:-len(ext)]
if not _os.path.isabs(dirname):
dirname = _os.path.normpath(_os.path.join(_os.getcwd(), dirname))
return dirname
def extract(fn, dest_dir=None, components=None):
if dest_dir:
if not _os.path.isabs(dest_dir):
dest_dir = _os.path.normpath(_os.path.join(_os.getcwd(), dest_dir))
if not _os.path.isdir(dest_dir):
_os.makedirs(dest_dir)
else:
dest_dir = get_default_extracted_folder(fn)
for ext in SUPPORTED_EXTENSIONS:
if fn.endswith(ext):
SUPPORTED_EXTENSIONS[ext].extract(fn, dest_dir, components=components)
break
else:
raise ValueError("Didn't recognize extension for file '{}'. Supported extensions are: {}"
.format(fn, list(SUPPORTED_EXTENSIONS.keys())))
def create(prefix, file_list, out_fn, out_folder=None, **kw):
if not out_folder:
out_folder = _os.getcwd()
if file_list is None:
file_list = _collect_paths(prefix)
elif isinstance(file_list, _string_types):
try:
with open(file_list) as f:
data = f.readlines()
file_list = [_.strip() for _ in data]
except:
raise
for ext in SUPPORTED_EXTENSIONS:
if out_fn.endswith(ext):
try:
out = SUPPORTED_EXTENSIONS[ext].create(prefix, file_list, out_fn, out_folder, **kw)
except:
# don't leave broken files around
if _os.path.isfile(out):
_rm_rf(out)
return out
def _convert(fn, out_ext, out_folder, **kw):
basename = get_default_extracted_folder(fn)
from .validate import validate_converted_files_match
if not basename:
print("Input file %s doesn't have a supported extension (%s), skipping it"
% (fn, SUPPORTED_EXTENSIONS))
return
out_fn = _os.path.join(out_folder, basename + out_ext)
errors = ""
if not _os.path.lexists(out_fn):
with _TemporaryDirectory(prefix=out_folder) as tmp:
try:
extract(fn, dest_dir=tmp)
file_list = _collect_paths(tmp)
create(tmp, file_list, _os.path.basename(out_fn), out_folder=out_folder, **kw)
_, missing_files, mismatching_sizes = validate_converted_files_match(
tmp, _os.path.join(out_folder, fn))
if missing_files or mismatching_sizes:
errors = str(ConversionError(missing_files, mismatching_sizes))
except Exception as e:
errors = str(e)
return fn, out_fn, errors
def transmute(in_file, out_ext, out_folder=None, processes=None, **kw):
if not out_folder:
out_folder = _os.path.dirname(in_file) or _os.getcwd()
flist = set(_glob(in_file))
if in_file.endswith('.tar.bz2'):
flist = flist - set(_glob(in_file.replace('.tar.bz2', out_ext)))
elif in_file.endswith('.conda'):
flist = flist - set(_glob(in_file.replace('.conda', out_ext)))
failed_files = {}
with _tqdm.tqdm(total=len(flist), leave=False) as t:
with _Executor(max_workers=processes) as executor:
convert_f = _functools.partial(_convert, out_ext=out_ext,
out_folder=out_folder, **kw)
for fn, out_fn, errors in executor.map(convert_f, flist):
t.set_description("Converted: %s" % fn)
t.update()
if errors:
failed_files[fn] = errors
_rm_rf(out_fn)
return failed_files
def verify_conversion(glob_pattern, target_dir, reference_ext,
tmpdir_root=_tempfile.gettempdir(), processes=None):
from .validate import validate_converted_files_match
if not glob_pattern.endswith(reference_ext):
glob_pattern = glob_pattern + reference_ext
file_sets_by_ext = {ext: _glob(_os.path.join(target_dir, glob_pattern + ext))
for ext in SUPPORTED_EXTENSIONS}
matches = {path.replace(ext, "") for ext, path in file_sets_by_ext[reference_ext]}
for ext, paths in file_sets_by_ext.items():
if ext == reference_ext:
continue
matches &= {path.replace(ext, "") for ext, path in paths}
other_exts = set(SUPPORTED_EXTENSIONS) - {reference_ext, }
errors = {}
with _tqdm.tqdm(total=(len(matches) * len(SUPPORTED_EXTENSIONS) - 1), leave=False) as t:
with _Executor(max_workers=processes) as executor:
for other_ext in other_exts:
verify_fn = lambda fn: validate_converted_files_match(ref_ext=reference_ext,
subject=fn + other_ext)
for fn, missing, mismatching in executor.map(verify_fn, matches):
t.set_description("Validating %s" % fn)
t.update()
if missing or mismatching:
errors[fn] = str(ConversionError(missing, mismatching))
return errors
def get_pkg_details(in_file):
"""For the new pkg format, we return the size and hashes of the inner pkg part of the file"""
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file)
break
else:
raise ValueError("Don't know what to do with file {}".format(in_file))
return details
| 40.508876
| 99
| 0.631171
|
import os as _os
from glob import glob as _glob
import functools as _functools
from concurrent.futures import ProcessPoolExecutor as _Executor
import tempfile as _tempfile
from six import string_types as _string_types
import tqdm as _tqdm
from .exceptions import ConversionError, InvalidArchiveError
from .tarball import CondaTarBZ2 as _CondaTarBZ2, libarchive_enabled
from .conda_fmt import CondaFormat_v2 as _CondaFormat_v2
from .utils import TemporaryDirectory as _TemporaryDirectory, rm_rf as _rm_rf
SUPPORTED_EXTENSIONS = {'.tar.bz2': _CondaTarBZ2,
'.conda': _CondaFormat_v2}
def _collect_paths(prefix):
dir_paths, file_paths = [], []
for dp, dn, filenames in _os.walk(prefix):
for f in filenames:
file_paths.append(_os.path.relpath(_os.path.join(dp, f), prefix))
dir_paths.extend(_os.path.relpath(_os.path.join(dp, _), prefix) for _ in dn)
file_list = file_paths + [dp for dp in dir_paths
if not any(f.startswith(dp) for f in file_paths)]
return file_list
def get_default_extracted_folder(in_file):
dirname = None
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
dirname = _os.path.basename(in_file)[:-len(ext)]
if not _os.path.isabs(dirname):
dirname = _os.path.normpath(_os.path.join(_os.getcwd(), dirname))
return dirname
def extract(fn, dest_dir=None, components=None):
if dest_dir:
if not _os.path.isabs(dest_dir):
dest_dir = _os.path.normpath(_os.path.join(_os.getcwd(), dest_dir))
if not _os.path.isdir(dest_dir):
_os.makedirs(dest_dir)
else:
dest_dir = get_default_extracted_folder(fn)
for ext in SUPPORTED_EXTENSIONS:
if fn.endswith(ext):
SUPPORTED_EXTENSIONS[ext].extract(fn, dest_dir, components=components)
break
else:
raise ValueError("Didn't recognize extension for file '{}'. Supported extensions are: {}"
.format(fn, list(SUPPORTED_EXTENSIONS.keys())))
def create(prefix, file_list, out_fn, out_folder=None, **kw):
if not out_folder:
out_folder = _os.getcwd()
if file_list is None:
file_list = _collect_paths(prefix)
elif isinstance(file_list, _string_types):
try:
with open(file_list) as f:
data = f.readlines()
file_list = [_.strip() for _ in data]
except:
raise
for ext in SUPPORTED_EXTENSIONS:
if out_fn.endswith(ext):
try:
out = SUPPORTED_EXTENSIONS[ext].create(prefix, file_list, out_fn, out_folder, **kw)
except:
# don't leave broken files around
if _os.path.isfile(out):
_rm_rf(out)
return out
def _convert(fn, out_ext, out_folder, **kw):
basename = get_default_extracted_folder(fn)
from .validate import validate_converted_files_match
if not basename:
print("Input file %s doesn't have a supported extension (%s), skipping it"
% (fn, SUPPORTED_EXTENSIONS))
return
out_fn = _os.path.join(out_folder, basename + out_ext)
errors = ""
if not _os.path.lexists(out_fn):
with _TemporaryDirectory(prefix=out_folder) as tmp:
try:
extract(fn, dest_dir=tmp)
file_list = _collect_paths(tmp)
create(tmp, file_list, _os.path.basename(out_fn), out_folder=out_folder, **kw)
_, missing_files, mismatching_sizes = validate_converted_files_match(
tmp, _os.path.join(out_folder, fn))
if missing_files or mismatching_sizes:
errors = str(ConversionError(missing_files, mismatching_sizes))
except Exception as e:
errors = str(e)
return fn, out_fn, errors
def transmute(in_file, out_ext, out_folder=None, processes=None, **kw):
if not out_folder:
out_folder = _os.path.dirname(in_file) or _os.getcwd()
flist = set(_glob(in_file))
if in_file.endswith('.tar.bz2'):
flist = flist - set(_glob(in_file.replace('.tar.bz2', out_ext)))
elif in_file.endswith('.conda'):
flist = flist - set(_glob(in_file.replace('.conda', out_ext)))
failed_files = {}
with _tqdm.tqdm(total=len(flist), leave=False) as t:
with _Executor(max_workers=processes) as executor:
convert_f = _functools.partial(_convert, out_ext=out_ext,
out_folder=out_folder, **kw)
for fn, out_fn, errors in executor.map(convert_f, flist):
t.set_description("Converted: %s" % fn)
t.update()
if errors:
failed_files[fn] = errors
_rm_rf(out_fn)
return failed_files
def verify_conversion(glob_pattern, target_dir, reference_ext,
tmpdir_root=_tempfile.gettempdir(), processes=None):
from .validate import validate_converted_files_match
if not glob_pattern.endswith(reference_ext):
glob_pattern = glob_pattern + reference_ext
file_sets_by_ext = {ext: _glob(_os.path.join(target_dir, glob_pattern + ext))
for ext in SUPPORTED_EXTENSIONS}
matches = {path.replace(ext, "") for ext, path in file_sets_by_ext[reference_ext]}
for ext, paths in file_sets_by_ext.items():
if ext == reference_ext:
continue
matches &= {path.replace(ext, "") for ext, path in paths}
other_exts = set(SUPPORTED_EXTENSIONS) - {reference_ext, }
errors = {}
with _tqdm.tqdm(total=(len(matches) * len(SUPPORTED_EXTENSIONS) - 1), leave=False) as t:
with _Executor(max_workers=processes) as executor:
for other_ext in other_exts:
verify_fn = lambda fn: validate_converted_files_match(ref_ext=reference_ext,
subject=fn + other_ext)
for fn, missing, mismatching in executor.map(verify_fn, matches):
t.set_description("Validating %s" % fn)
t.update()
if missing or mismatching:
errors[fn] = str(ConversionError(missing, mismatching))
return errors
def get_pkg_details(in_file):
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file)
break
else:
raise ValueError("Don't know what to do with file {}".format(in_file))
return details
| true
| true
|
7905d5bc39ac0185a8b575225ed8ad192aa5acc0
| 68,865
|
py
|
Python
|
python/jsbeautifier/__init__.py
|
fedmich/js-beautify
|
3d611fc0b2fcb81ca0019f82499dc19709178255
|
[
"MIT"
] | 1
|
2019-07-18T12:00:30.000Z
|
2019-07-18T12:00:30.000Z
|
python/jsbeautifier/__init__.py
|
fedmich/js-beautify
|
3d611fc0b2fcb81ca0019f82499dc19709178255
|
[
"MIT"
] | null | null | null |
python/jsbeautifier/__init__.py
|
fedmich/js-beautify
|
3d611fc0b2fcb81ca0019f82499dc19709178255
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import six
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2013 Einar Lielmanis and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, einar@jsbeautifier.org,
# Parsing improvement for brace-less and semicolon-less statements
# by Liam Newman <bitwiseman@gmail.com>
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.in_html_comment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
self.had_comment = False
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level;
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level;
self.parent = flags_base;
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
# Using object instead of string to allow for later expansion of info about each line
class OutputLine:
def __init__(self):
self.text = []
class Acorn:
def __init__(self):
# This section of code was translated to python from acorn (javascript).
#
# Acorn was written by Marijn Haverbeke and released under an MIT
# license. The Unicode regexps (for identifiers and whitespace) were
# taken from [Esprima](http://esprima.org) by Ariya Hidayat.
#
# Git repositories for Acorn are available at
#
# http://marijnhaverbeke.nl/git/acorn
# https://github.com/marijnh/acorn.git
# ## Character categories
# Big ugly regular expressions that match characters in the
# whitespace, identifier, and identifier-start categories. These
# are only applied when a character is found to actually have a
# code point above 128.
self.nonASCIIwhitespace = re.compile(six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
# Whether a single character denotes a newline.
self.newline = re.compile(six.u("[\n\r\u2028\u2029]"))
# Matches a whole line break (where CRLF is considered a single
# line break). Used to count lines.
self.lineBreak = re.compile(six.u("\r\n|[\n\r\u2028\u2029]"))
# Test whether a given character code starts an identifier.
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifierStart.match(six.unichr(code)) != None;
# Test whether a given character is part of an identifier.
def isIdentifierChar(self, code):
if code < 48:
return code == 36;
if code < 58:
return True;
if code < 65:
return False;
if code < 91:
return True;
if code < 97:
return code == 95;
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifier.match(six.unichr(code)) != None;
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts);
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
self.acorn = Acorn();
def blank_state(self):
# internal flags
self.flags = None
self.previous_flags = None
self.flag_store = []
self.input_wanted_newline = False
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_type = 'TK_START_BLOCK' # last token type
self.last_last_text = '' # pre-last token text
self.input = None
self.output_lines = [ OutputLine() ]
self.output_space_before_token = False
self.whitespace_before_token = []
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,yield'.split(',')
self.reserved_words = self.line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof'];
self.set_mode(MODE.BlockStatement)
self.parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, self.opts.eval_code)
self.parser_pos = 0
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
while True:
self.token_text, self.token_type = self.get_next_token()
#print (token_text, self.token_type, self.flags.mode)
if self.token_type == 'TK_EOF':
while self.flags.mode == MODE.Statement:
self.restore_mode();
break
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
self.input_wanted_newline = self.n_newlines > 0
if keep_whitespace:
for i in range(self.n_newlines):
self.append_newline(i > 0)
else: # not keep_whitespace
if self.opts.max_preserve_newlines != 0 and self.n_newlines > self.opts.max_preserve_newlines:
self.n_newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i != 0)
handlers[self.token_type](self.token_text)
# The cleanest handling of inline comments is to treat them as though they aren't there.
# Just continue formatting and the behavior should be logical.
if self.token_type != 'TK_INLINE_COMMENT' and self.token_type != 'TK_COMMENT' and self.token_type != 'TK_BLOCK_COMMENT' and self.token_type != 'TK_UNKNOWN':
self.last_last_text = self.flags.last_text
self.last_type = self.token_type
self.flags.last_text = self.token_text
self.flags.had_comment = self.token_type in ['TK_COMMENT', 'TK_INLINE_COMMENT', 'TK_BLOCK_COMMENT']
sweet_code = ''.join(self.output_lines[0].text)
if len(self.output_lines) > 1:
for line_index in range(1, len(self.output_lines)):
sweet_code += '\n' + ''.join(self.output_lines[line_index].text);
sweet_code = re.sub('[\n ]+$', '', sweet_code)
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
self.trim_output_line(self.output_lines[-1])
while eat_newlines and len(self.output_lines) > 1 and \
len(self.output_lines[-1].text) == 0:
self.output_lines.pop()
self.trim_output_line(self.output_lines[-1])
def trim_output_line(self, line):
while len(line.text) \
and (
line.text[-1] == ' '\
or line.text[-1] == self.indent_string \
or line.text[-1] == self.preindent_string):
line.text.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def just_added_newline(self):
line = self.output_lines[-1]
return len(line.text) == 0
def just_added_blankline(self):
if self.just_added_newline():
if len(self.output_lines) == 1:
return True
line = self.output_lines[-2]
return len(line.text) == 0
return False
def allow_wrap_or_preserved_newline(self, token_text, force_linewrap = False):
if self.opts.wrap_line_length > 0 and not force_linewrap:
line = self.output_lines[-1]
# never wrap the first token of a line.
if len(line.text) > 0:
proposed_line_length = len(''.join(line.text)) + len(token_text)
if self.output_space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
force_linewrap = True
if ((self.opts.preserve_newlines and self.input_wanted_newline) or force_linewrap) and not self.just_added_newline():
self.append_newline(preserve_statement_flags = True)
def append_newline(self, force_newline = False, preserve_statement_flags = False):
self.output_space_before_token = False
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode();
if len(self.output_lines) == 1 and self.just_added_newline():
# no newline on start of file
return
if force_newline or not self.just_added_newline():
self.flags.multiline_frame = True
self.output_lines.append(OutputLine())
def append_token_line_indentation(self):
if self.just_added_newline():
line = self.output_lines[-1]
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and self.input_wanted_newline:
# prevent removing of this whitespace as redundant
line.text.append('');
for item in self.whitespace_before_token:
line.text.append(item)
else:
if self.preindent_string != '':
line.text.append(self.preindent_string)
level = self.flags.indentation_level;
self.append_indent_string(level)
def append_indent_string(self, level):
# Never indent your first output indent at the start of the file
if len(self.output_lines) > 1:
line = self.output_lines[-1]
self.flags.line_indent_level = level
for i in range(level):
line.text.append(self.indent_string)
def append_token_space_before(self):
# make sure only single space gets drawn
line = self.output_lines[-1]
if self.output_space_before_token and len(line.text) and line.text[-1] not in [' ', self.indent_string]:
line.text.append(' ')
def append_token(self, s):
self.append_token_line_indentation()
self.append_token_space_before()
self.output_space_before_token = False
self.output_lines[-1].text.append(s)
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - less than great performance due to array splicing
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame:
return
# remove one indent from each line inside this section
index = frame.start_line_index
splice_index = 0
while index < len(self.output_lines):
line = self.output_lines[index]
index += 1
# skip empty lines
if len(line.text) == 0:
continue
# skip the preindent string if present
if self.preindent_string != '' and \
line.text[0] == self.preindent_string:
splice_index = 1
else:
splice_index = 0
# remove one indent, if present
if line.text[splice_index] == self.indent_string:
del line.text[splice_index]
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.just_added_newline());
self.flags.start_line_index = len(self.output_lines)
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and self.flags.last_text == ':' and \
self.flags.ternary_depth == 0
def start_of_statement(self):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not self.input_wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (self.token_type == 'TK_RESERVED' and self.token_text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (self.token_text == '--' or self.token_text == '++')
and self.token_type != 'TK_WORD' and self.token_type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and self.flags.last_text == ':' and self.flags.ternary_depth == 0) \
):
self.set_mode(MODE.Statement);
self.indent();
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(self.token_text, self.token_type == 'TK_RESERVED' and self.token_text in ['do', 'for', 'if', 'while']);
return True
else:
return False
def is_next(self, find):
local_pos = self.parser_pos
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
while (c in self.whitespace) and c != find:
local_pos+= 1
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
return c == find
def get_next_token(self):
self.n_newlines = 0
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
self.input_wanted_newline = False
self.whitespace_before_token = []
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if c == '\n':
self.n_newlines += 1
self.whitespace_before_token = []
elif c == self.indent_string:
self.whitespace_before_token.append(self.indent_string)
elif c != '\r':
self.whitespace_before_token.append(' ')
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
# NOTE: because beautifier doesn't fully parse, it doesn't use acorn.isIdentifierStart.
# It just treats all identifiers and numbers and such the same.
if self.acorn.isIdentifierChar(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
# small and surprisingly unugly hack for IE-10 representation
if self.parser_pos != len(self.input) and self.input[self.parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[self.parser_pos]
self.parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if not (self.last_type == 'TK_DOT' \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
if self.parser_pos < len(self.input):
while not (self.input[self.parser_pos] == '*' and \
self.parser_pos + 1 < len(self.input) and \
self.input[self.parser_pos + 1] == '/')\
and self.parser_pos < len(self.input):
c = self.input[self.parser_pos]
comment += c
if c in '\r\n':
inline_comment = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
self.parser_pos += 2
if inline_comment and self.n_newlines == 0:
return '/*' + comment + '*/', 'TK_INLINE_COMMENT'
else:
return '/*' + comment + '*/', 'TK_BLOCK_COMMENT'
if self.input[self.parser_pos] == '/': # peek // comment
comment = c
while self.input[self.parser_pos] not in '\r\n':
comment += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*\/?\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.previous_flags.mode in [MODE.Conditional, MODE.ForInitializer]) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if self.parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*(\/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
return xmlStr[:xmlLength], 'TK_STRING'
else:
# handle string
while esc or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
self.parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.wordchar:
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output_lines) == 1 and len(self.output_lines[0].text) == 0 and \
len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.digits:
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and self.input[self.parser_pos] != '\n':
c += self.input[self.parser_pos]
self.parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.flags.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if token_text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in self.line_starters:
self.output_space_before_token = True
self.set_mode(next_mode)
self.append_token(token_text)
self.indent()
if self.opts.space_in_paren:
self.output_space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.append_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(token_text, self.input_wanted_newline);
elif not (self.last_type == 'TK_RESERVED' and token_text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output_space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in self.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output_space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
self.set_mode(next_mode)
self.append_token(token_text)
if self.opts.space_in_paren:
self.output_space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, token_text):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(self.token_text, self.token_text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output_space_before_token = False
self.trim_output()
else:
self.output_space_before_token = True
if self.token_text == ']' and self.opts.keep_array_indentation:
self.append_token(token_text)
self.restore_mode()
else:
self.restore_mode()
self.append_token(token_text)
self.remove_redundant_indentation(self.previous_flags);
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, token_text):
self.set_mode(MODE.BlockStatement)
empty_braces = self.is_next('}')
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output_space_before_token = True
else:
self.append_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.output_space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output_space_before_token = True
else:
self.append_newline()
self.append_token(token_text)
self.indent()
def handle_end_block(self, token_text):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK';
if self.opts.brace_style == 'expand':
if not empty_braces:
self.append_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.restore_mode()
self.append_token(token_text)
def handle_word(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
elif self.input_wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.append_newline()
if self.flags.do_block and not self.flags.do_while:
if self.token_type == 'TK_RESERVED' and token_text == 'while':
# do {} ## while ()
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.append_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (self.token_type == 'TK_RESERVED' and token_text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False;
if self.token_type == 'TK_RESERVED' and (token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement)):
self.append_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.append_token(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if self.token_type == 'TK_RESERVED' and token_text == 'function':
if self.flags.last_text in ['}', ';'] or (self.just_added_newline() and not self.flags.last_text in ['{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.just_added_blankline() and not self.flags.had_comment:
self.append_newline()
self.append_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return']:
self.output_space_before_token = True
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output_space_before_token = True
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
if self.token_type == 'TK_RESERVED' and token_text == 'function':
self.append_token(token_text)
self.flags.last_word = token_text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output_space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output_space_before_token = True
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
line = self.output_lines[-1]
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if line.text[-1] != '}':
self.append_newline()
self.output_space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output_space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (self.token_type == 'TK_RESERVED' and token_text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if self.token_type == 'TK_RESERVED' and token_text == 'if' and self.flags.last_word == 'else' and self.flags.last_text != '{':
self.output_space_before_token = True
else:
self.append_newline()
elif self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
self.append_newline()
elif self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output_space_before_token = True
self.append_token(token_text)
self.flags.last_word = token_text
if self.token_type == 'TK_RESERVED' and token_text == 'do':
self.flags.do_block = True
if self.token_type == 'TK_RESERVED' and token_text == 'if':
self.flags.if_block = True
def handle_semicolon(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output_space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral:
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = MODE.BlockStatement
def handle_string(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output_space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
else:
self.append_newline()
self.append_token(token_text)
def handle_equals(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comma(self, token_text):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.append_token(token_text)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.append_newline(preserve_statement_flags = True)
else:
self.output_space_before_token = True
return
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.append_newline()
else:
# EXPR or DO_BLOCK
self.output_space_before_token = True
def handle_operator(self, token_text):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
if self.token_text == ':' and self.flags.mode == MODE.BlockStatement and \
self.last_last_text == '{' and \
(self.last_type == 'TK_WORD' or self.last_type == 'TK_RESERVED'):
self.flags.mode = MODE.ObjectLiteral
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
space_before = True
space_after = True
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output_space_before_token = True
self.append_token(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append_token(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.append_token(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append_token(token_text)
return
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if self.input_wanted_newline and (token_text == '--' or token_text == '++'):
self.append_newline()
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(token_text)
if token_text in ['--', '++', '!', '~'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in self.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == MODE.BlockStatement:
self.flags.mode = MODE.ObjectLiteral
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
elif self.token_text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output_space_before_token = True
self.append_token(token_text)
if space_after:
self.output_space_before_token = True
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
javadoc = False
# block comment starts with a new line
self.append_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
# first line always indented
self.append_token(lines[0])
for line in lines[1:]:
self.append_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.append_token(' ' + line.strip())
else:
# normal comments output raw
self.output_lines[-1].text.append(line)
self.append_newline(preserve_statement_flags = True)
def handle_inline_comment(self, token_text):
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comment(self, token_text):
if self.input_wanted_newline:
self.append_newline(preserve_statement_flags = True)
if not self.input_wanted_newline:
self.trim_output(True)
self.output_space_before_token = True
self.append_token(token_text)
self.append_newline(preserve_statement_flags = True)
def handle_dot(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output_space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(token_text,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.append_token(token_text)
def handle_unknown(self, token_text):
self.append_token(token_text)
if token_text[len(token_text) - 1] == '\n':
self.append_newline()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:dEPjbkil:xhtfvXw:",
['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help', 'usage',
'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| 44.059501
| 4,249
| 0.583228
|
from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import six
from jsbeautifier.__version__ import __version__
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.in_html_comment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
self.had_comment = False
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level;
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level;
self.parent = flags_base;
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
class OutputLine:
def __init__(self):
self.text = []
class Acorn:
def __init__(self):
self.nonASCIIwhitespace = re.compile(six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
self.newline = re.compile(six.u("[\n\r\u2028\u2029]"))
self.lineBreak = re.compile(six.u("\r\n|[\n\r\u2028\u2029]"))
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifierStart.match(six.unichr(code)) != None;
def isIdentifierChar(self, code):
if code < 48:
return code == 36;
if code < 58:
return True;
if code < 65:
return False;
if code < 91:
return True;
if code < 97:
return code == 95;
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifier.match(six.unichr(code)) != None;
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-':
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts);
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
self.acorn = Acorn();
def blank_state(self):
self.flags = None
self.previous_flags = None
self.flag_store = []
self.input_wanted_newline = False
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_type = 'TK_START_BLOCK'
self.last_last_text = ''
self.input = None
self.output_lines = [ OutputLine() ]
self.output_space_before_token = False
self.whitespace_before_token = []
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
self.line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,yield'.split(',')
self.reserved_words = self.line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof'];
self.set_mode(MODE.BlockStatement)
self.parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, self.opts.eval_code)
self.parser_pos = 0
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
while True:
self.token_text, self.token_type = self.get_next_token()
if self.token_type == 'TK_EOF':
while self.flags.mode == MODE.Statement:
self.restore_mode();
break
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
self.input_wanted_newline = self.n_newlines > 0
if keep_whitespace:
for i in range(self.n_newlines):
self.append_newline(i > 0)
else:
if self.opts.max_preserve_newlines != 0 and self.n_newlines > self.opts.max_preserve_newlines:
self.n_newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i != 0)
handlers[self.token_type](self.token_text)
# Just continue formatting and the behavior should be logical.
if self.token_type != 'TK_INLINE_COMMENT' and self.token_type != 'TK_COMMENT' and self.token_type != 'TK_BLOCK_COMMENT' and self.token_type != 'TK_UNKNOWN':
self.last_last_text = self.flags.last_text
self.last_type = self.token_type
self.flags.last_text = self.token_text
self.flags.had_comment = self.token_type in ['TK_COMMENT', 'TK_INLINE_COMMENT', 'TK_BLOCK_COMMENT']
sweet_code = ''.join(self.output_lines[0].text)
if len(self.output_lines) > 1:
for line_index in range(1, len(self.output_lines)):
sweet_code += '\n' + ''.join(self.output_lines[line_index].text);
sweet_code = re.sub('[\n ]+$', '', sweet_code)
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
self.trim_output_line(self.output_lines[-1])
while eat_newlines and len(self.output_lines) > 1 and \
len(self.output_lines[-1].text) == 0:
self.output_lines.pop()
self.trim_output_line(self.output_lines[-1])
def trim_output_line(self, line):
while len(line.text) \
and (
line.text[-1] == ' '\
or line.text[-1] == self.indent_string \
or line.text[-1] == self.preindent_string):
line.text.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def just_added_newline(self):
line = self.output_lines[-1]
return len(line.text) == 0
def just_added_blankline(self):
if self.just_added_newline():
if len(self.output_lines) == 1:
return True
line = self.output_lines[-2]
return len(line.text) == 0
return False
def allow_wrap_or_preserved_newline(self, token_text, force_linewrap = False):
if self.opts.wrap_line_length > 0 and not force_linewrap:
line = self.output_lines[-1]
# never wrap the first token of a line.
if len(line.text) > 0:
proposed_line_length = len(''.join(line.text)) + len(token_text)
if self.output_space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
force_linewrap = True
if ((self.opts.preserve_newlines and self.input_wanted_newline) or force_linewrap) and not self.just_added_newline():
self.append_newline(preserve_statement_flags = True)
def append_newline(self, force_newline = False, preserve_statement_flags = False):
self.output_space_before_token = False
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode();
if len(self.output_lines) == 1 and self.just_added_newline():
# no newline on start of file
return
if force_newline or not self.just_added_newline():
self.flags.multiline_frame = True
self.output_lines.append(OutputLine())
def append_token_line_indentation(self):
if self.just_added_newline():
line = self.output_lines[-1]
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and self.input_wanted_newline:
# prevent removing of this whitespace as redundant
line.text.append('');
for item in self.whitespace_before_token:
line.text.append(item)
else:
if self.preindent_string != '':
line.text.append(self.preindent_string)
level = self.flags.indentation_level;
self.append_indent_string(level)
def append_indent_string(self, level):
# Never indent your first output indent at the start of the file
if len(self.output_lines) > 1:
line = self.output_lines[-1]
self.flags.line_indent_level = level
for i in range(level):
line.text.append(self.indent_string)
def append_token_space_before(self):
# make sure only single space gets drawn
line = self.output_lines[-1]
if self.output_space_before_token and len(line.text) and line.text[-1] not in [' ', self.indent_string]:
line.text.append(' ')
def append_token(self, s):
self.append_token_line_indentation()
self.append_token_space_before()
self.output_space_before_token = False
self.output_lines[-1].text.append(s)
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - less than great performance due to array splicing
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame:
return
# remove one indent from each line inside this section
index = frame.start_line_index
splice_index = 0
while index < len(self.output_lines):
line = self.output_lines[index]
index += 1
# skip empty lines
if len(line.text) == 0:
continue
# skip the preindent string if present
if self.preindent_string != '' and \
line.text[0] == self.preindent_string:
splice_index = 1
else:
splice_index = 0
# remove one indent, if present
if line.text[splice_index] == self.indent_string:
del line.text[splice_index]
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.just_added_newline());
self.flags.start_line_index = len(self.output_lines)
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and self.flags.last_text == ':' and \
self.flags.ternary_depth == 0
def start_of_statement(self):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not self.input_wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (self.token_type == 'TK_RESERVED' and self.token_text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (self.token_text == '--' or self.token_text == '++')
and self.token_type != 'TK_WORD' and self.token_type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and self.flags.last_text == ':' and self.flags.ternary_depth == 0) \
):
self.set_mode(MODE.Statement);
self.indent();
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(self.token_text, self.token_type == 'TK_RESERVED' and self.token_text in ['do', 'for', 'if', 'while']);
return True
else:
return False
def is_next(self, find):
local_pos = self.parser_pos
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
while (c in self.whitespace) and c != find:
local_pos+= 1
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
return c == find
def get_next_token(self):
self.n_newlines = 0
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
self.input_wanted_newline = False
self.whitespace_before_token = []
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if c == '\n':
self.n_newlines += 1
self.whitespace_before_token = []
elif c == self.indent_string:
self.whitespace_before_token.append(self.indent_string)
elif c != '\r':
self.whitespace_before_token.append(' ')
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
# NOTE: because beautifier doesn't fully parse, it doesn't use acorn.isIdentifierStart.
# It just treats all identifiers and numbers and such the same.
if self.acorn.isIdentifierChar(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
# small and surprisingly unugly hack for IE-10 representation
if self.parser_pos != len(self.input) and self.input[self.parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[self.parser_pos]
self.parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if not (self.last_type == 'TK_DOT' \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
if self.parser_pos < len(self.input):
while not (self.input[self.parser_pos] == '*' and \
self.parser_pos + 1 < len(self.input) and \
self.input[self.parser_pos + 1] == '/')\
and self.parser_pos < len(self.input):
c = self.input[self.parser_pos]
comment += c
if c in '\r\n':
inline_comment = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
self.parser_pos += 2
if inline_comment and self.n_newlines == 0:
return '/*' + comment + '*/', 'TK_INLINE_COMMENT'
else:
return '/*' + comment + '*/', 'TK_BLOCK_COMMENT'
if self.input[self.parser_pos] == '/': # peek // comment
comment = c
while self.input[self.parser_pos] not in '\r\n':
comment += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*\/?\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.previous_flags.mode in [MODE.Conditional, MODE.ForInitializer]) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if self.parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*(\/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
return xmlStr[:xmlLength], 'TK_STRING'
else:
# handle string
while esc or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
self.parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.wordchar:
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output_lines) == 1 and len(self.output_lines[0].text) == 0 and \
len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.digits:
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and self.input[self.parser_pos] != '\n':
c += self.input[self.parser_pos]
self.parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.flags.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if token_text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in self.line_starters:
self.output_space_before_token = True
self.set_mode(next_mode)
self.append_token(token_text)
self.indent()
if self.opts.space_in_paren:
self.output_space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.append_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(token_text, self.input_wanted_newline);
elif not (self.last_type == 'TK_RESERVED' and token_text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output_space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in self.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output_space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
self.set_mode(next_mode)
self.append_token(token_text)
if self.opts.space_in_paren:
self.output_space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, token_text):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(self.token_text, self.token_text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output_space_before_token = False
self.trim_output()
else:
self.output_space_before_token = True
if self.token_text == ']' and self.opts.keep_array_indentation:
self.append_token(token_text)
self.restore_mode()
else:
self.restore_mode()
self.append_token(token_text)
self.remove_redundant_indentation(self.previous_flags);
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, token_text):
self.set_mode(MODE.BlockStatement)
empty_braces = self.is_next('}')
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output_space_before_token = True
else:
self.append_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.output_space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output_space_before_token = True
else:
self.append_newline()
self.append_token(token_text)
self.indent()
def handle_end_block(self, token_text):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK';
if self.opts.brace_style == 'expand':
if not empty_braces:
self.append_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.restore_mode()
self.append_token(token_text)
def handle_word(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
elif self.input_wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.append_newline()
if self.flags.do_block and not self.flags.do_while:
if self.token_type == 'TK_RESERVED' and token_text == 'while':
# do {} ## while ()
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.append_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (self.token_type == 'TK_RESERVED' and token_text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False;
if self.token_type == 'TK_RESERVED' and (token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement)):
self.append_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.append_token(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if self.token_type == 'TK_RESERVED' and token_text == 'function':
if self.flags.last_text in ['}', ';'] or (self.just_added_newline() and not self.flags.last_text in ['{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.just_added_blankline() and not self.flags.had_comment:
self.append_newline()
self.append_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return']:
self.output_space_before_token = True
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output_space_before_token = True
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
if self.token_type == 'TK_RESERVED' and token_text == 'function':
self.append_token(token_text)
self.flags.last_word = token_text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output_space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output_space_before_token = True
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
line = self.output_lines[-1]
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if line.text[-1] != '}':
self.append_newline()
self.output_space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output_space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (self.token_type == 'TK_RESERVED' and token_text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if self.token_type == 'TK_RESERVED' and token_text == 'if' and self.flags.last_word == 'else' and self.flags.last_text != '{':
self.output_space_before_token = True
else:
self.append_newline()
elif self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
self.append_newline()
elif self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output_space_before_token = True
self.append_token(token_text)
self.flags.last_word = token_text
if self.token_type == 'TK_RESERVED' and token_text == 'do':
self.flags.do_block = True
if self.token_type == 'TK_RESERVED' and token_text == 'if':
self.flags.if_block = True
def handle_semicolon(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output_space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral:
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = MODE.BlockStatement
def handle_string(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output_space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
else:
self.append_newline()
self.append_token(token_text)
def handle_equals(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comma(self, token_text):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.append_token(token_text)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.append_newline(preserve_statement_flags = True)
else:
self.output_space_before_token = True
return
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.append_newline()
else:
# EXPR or DO_BLOCK
self.output_space_before_token = True
def handle_operator(self, token_text):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
if self.token_text == ':' and self.flags.mode == MODE.BlockStatement and \
self.last_last_text == '{' and \
(self.last_type == 'TK_WORD' or self.last_type == 'TK_RESERVED'):
self.flags.mode = MODE.ObjectLiteral
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
space_before = True
space_after = True
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output_space_before_token = True
self.append_token(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append_token(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.append_token(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append_token(token_text)
return
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if self.input_wanted_newline and (token_text == '--' or token_text == '++'):
self.append_newline()
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(token_text)
if token_text in ['--', '++', '!', '~'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in self.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == MODE.BlockStatement:
self.flags.mode = MODE.ObjectLiteral
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
elif self.token_text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output_space_before_token = True
self.append_token(token_text)
if space_after:
self.output_space_before_token = True
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
javadoc = False
# block comment starts with a new line
self.append_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
# first line always indented
self.append_token(lines[0])
for line in lines[1:]:
self.append_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.append_token(' ' + line.strip())
else:
# normal comments output raw
self.output_lines[-1].text.append(line)
self.append_newline(preserve_statement_flags = True)
def handle_inline_comment(self, token_text):
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comment(self, token_text):
if self.input_wanted_newline:
self.append_newline(preserve_statement_flags = True)
if not self.input_wanted_newline:
self.trim_output(True)
self.output_space_before_token = True
self.append_token(token_text)
self.append_newline(preserve_statement_flags = True)
def handle_dot(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output_space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(token_text,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.append_token(token_text)
def handle_unknown(self, token_text):
self.append_token(token_text)
if token_text[len(token_text) - 1] == '\n':
self.append_newline()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:dEPjbkil:xhtfvXw:",
['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help', 'usage',
'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| true
| true
|
7905d6456da47510edf5e40dc15e2a37bf7dd362
| 4,448
|
py
|
Python
|
scripts/csv_xml.py
|
markendr/RAJAPerf
|
265ad5b98c0ca5a7f0ba996f90220d0b232d1bac
|
[
"BSD-3-Clause"
] | 69
|
2017-09-25T20:23:16.000Z
|
2022-03-05T09:04:15.000Z
|
scripts/csv_xml.py
|
markendr/RAJAPerf
|
265ad5b98c0ca5a7f0ba996f90220d0b232d1bac
|
[
"BSD-3-Clause"
] | 148
|
2017-09-27T21:41:16.000Z
|
2022-03-31T20:27:36.000Z
|
scripts/csv_xml.py
|
markendr/RAJAPerf
|
265ad5b98c0ca5a7f0ba996f90220d0b232d1bac
|
[
"BSD-3-Clause"
] | 31
|
2017-11-22T08:23:32.000Z
|
2022-02-10T03:03:56.000Z
|
#!/bin/env python
import csv
from datetime import datetime
import os
import xml.etree.ElementTree as ET
import xml
# https://stackabuse.com/reading-and-writing-xml-files-in-python/
# xmlformatter:
# https://www.freeformatter.com/xml-formatter.html#ad-output
infile = "./RAJAPerf-timing.csv"
def read_infile(infile):
"""STUB"""
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
def get_date():
"""STUB"""
date = datetime.now().strftime("%-Y-%m-%dT%H:%M:%S")
return date
date = get_date()
perf_report = ET.Element("performance-report")
name ="RAJAPerf" + date + ".xml"
time_units="seconds"
perf_report.set("date", date)
perf_report.set("name", name)
perf_report.set("time-units", time_units)
perf_root = ET.SubElement(perf_report, 'timing')
perf_root.set("end-time",date)
perf_root.set("name", "kokkos_perf_suite")
#print(ET.tostring(perf_report))
# b'<performance-report time-units="seconds" date="2020-12-16T14:34:40"
# name="RAJAPerf-timing.csv"><timing end-time="2020-12-16T14:34:40"
# name="kokkos_perf_suite" /></performance-report>'
# metadata TBD
# create hierarchy
test_suite_list = []
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
for row in rps_reader:
test_suite_list.append(row)
suite_names_set = set([x[0][:x[0].find("_")] for x in test_suite_list[2:]])
#suite_names_set
#Out[135]: {'Basic', 'KokkosMechanics'}
heirarch_dict = dict()
for name in suite_names_set:
heirarch_dict[name] = []
# heirarch_dict
# Out[137]: {'KokkosMechanics': [], 'Basic': []}
for item in test_suite_list[2:]:
key = item[0][:item[0].find("_")]
heirarch_dict[key].append(item)
#print(item)
#NEXT STEPS: For the main test categories, Basic and KokkosMechanics, sum
# the test times over all of the kernels for each of their variants
col_meanings_dict = dict()
for index, item in enumerate(test_suite_list[1]):
#print(index, item)
col_meanings_dict[index] = item
#col_meanings_dict
# Out[152]:
# {0: 'Kernel ',
# 1: ' Base_Seq ',
# 2: ' Lambda_Seq ',
# 3: ' RAJA_Seq ',
# 4: ' Base_CUDA ',
# 5: ' RAJA_CUDA ',
# 6: ' Kokkos_Lambda_Seq ',
# 7: ' Kokkos_Functor_Seq ',
# 8: ' Kokkos_Lambda_CUDA ',
# 9: ' Kokkos_Functor_CUDA'}
def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
"""STUB -- xml_element will be an element of perf_report;
timing_dict = a map of variant names to test run times
"""
for key, value in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set("name", suite_or_test_name.strip())
def create_RPS_xml_report(suite_name, suite_data_list):
"""STUB - suite_name is a string = Basic, KokkosMechanics, etc.;
suite_data_list will be the values for a key, Basic or KokkosMechanics
"""
aggregate_results_dict = dict()
#print(suite_data_list)
for list_item in suite_data_list:
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
if variant_name not in aggregate_results_dict:
aggregate_results_dict[variant_name] = 0.0
# sums values of all the basic kernels
aggregate_results_dict[variant_name] += float(timing)
#print(aggregate_results_dict)
suite_root = ET.SubElement(perf_root, "timing")
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, "timing")
associate_timings_with_xml(xml_element_for_a_kernel_test,
test_timings_dict, list_item[0])
def run():
"""STUB"""
read_infile(infile)
#create_RPS_xml_report("Basic", heirarch_dict["Basic"])
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
# Aided in debugging
#print(heirarch_dict["KokkosMechanics"])
# Prints xml to screen as string
#print(ET.tostring(perf_report))
ET.dump(perf_report)
if __name__ == "__main__":
run()
| 25.563218
| 78
| 0.678507
|
import csv
from datetime import datetime
import os
import xml.etree.ElementTree as ET
import xml
= "./RAJAPerf-timing.csv"
def read_infile(infile):
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
def get_date():
date = datetime.now().strftime("%-Y-%m-%dT%H:%M:%S")
return date
date = get_date()
perf_report = ET.Element("performance-report")
name ="RAJAPerf" + date + ".xml"
time_units="seconds"
perf_report.set("date", date)
perf_report.set("name", name)
perf_report.set("time-units", time_units)
perf_root = ET.SubElement(perf_report, 'timing')
perf_root.set("end-time",date)
perf_root.set("name", "kokkos_perf_suite")
# name="RAJAPerf-timing.csv"><timing end-time="2020-12-16T14:34:40"
# name="kokkos_perf_suite" /></performance-report>'
test_suite_list = []
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
for row in rps_reader:
test_suite_list.append(row)
suite_names_set = set([x[0][:x[0].find("_")] for x in test_suite_list[2:]])
heirarch_dict = dict()
for name in suite_names_set:
heirarch_dict[name] = []
for item in test_suite_list[2:]:
key = item[0][:item[0].find("_")]
heirarch_dict[key].append(item)
col_meanings_dict = dict()
for index, item in enumerate(test_suite_list[1]):
col_meanings_dict[index] = item
def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
for key, value in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set("name", suite_or_test_name.strip())
def create_RPS_xml_report(suite_name, suite_data_list):
aggregate_results_dict = dict()
for list_item in suite_data_list:
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
if variant_name not in aggregate_results_dict:
aggregate_results_dict[variant_name] = 0.0
aggregate_results_dict[variant_name] += float(timing)
suite_root = ET.SubElement(perf_root, "timing")
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, "timing")
associate_timings_with_xml(xml_element_for_a_kernel_test,
test_timings_dict, list_item[0])
def run():
read_infile(infile)
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
ET.dump(perf_report)
if __name__ == "__main__":
run()
| true
| true
|
7905d6df40815cc1d1694afa95957d417f187151
| 2,044
|
py
|
Python
|
eva_storage/jvc/jvc_runner_v2.py
|
jaehobang/cs7643_project
|
7ec3d1934fc462ba230bd0615c939e7de00dc10c
|
[
"Apache-2.0"
] | null | null | null |
eva_storage/jvc/jvc_runner_v2.py
|
jaehobang/cs7643_project
|
7ec3d1934fc462ba230bd0615c939e7de00dc10c
|
[
"Apache-2.0"
] | null | null | null |
eva_storage/jvc/jvc_runner_v2.py
|
jaehobang/cs7643_project
|
7ec3d1934fc462ba230bd0615c939e7de00dc10c
|
[
"Apache-2.0"
] | null | null | null |
"""
This is the runner of the entire eva.jvc system.
Version 1,
the steps for the entire pipeline are as follows:
1. preprocessor -- get rep indices, save children metadata
2. encoder -- encode video by forcing i-frames (also modify the i-frame skip rate)
3. decoder -- using metadata, select the i-frames you want to decode.
If the user wants more frames than the number of i frames, then I guess we have to decode the entire video??
@Jaeho Bang
"""
import os
from eva_storage.jvc.preprocessor import *
from eva_storage.jvc.encoder import *
from eva_storage.jvc.decoder import *
from loaders.seattle_loader import SeattleLoader
from timer import Timer
class JVCRunner_v2:
def __init__(self):
self.preprocessor = Preprocessor()
self.compressor = Compressor()
self.decompressor = Decompressor()
self.video_loader = SeattleLoader()
def encode(self, path_to_video):
video_filename = os.path.basename(path_to_video)
###TODO: eliminate the extension
video_filename = video_filename.split('.')[0]
images, metadata = self.video_loader.load_images(
path_to_video) ## we might need metadata such as fps, frame_width, frame_height, fourcc from here
rep_indices = self.preprocessor.run(images, video_filename)
self.compressor.run(images, rep_indices, metadata)
def decode(self, path_to_video, number_of_samples=None):
images = self.decompressor.run(path_to_video, number_of_samples)
return images
if __name__ == "__main__":
timer = Timer() ##TODO: use the timer to run the pipeline
preprocessor = Preprocessor()
compressor = Compressor()
decompressor = Decompressor()
video_loader = SeattleLoader()
images = video_loader.load_images()
meta_data = preprocessor.run(images)
save_directory = compressor.run(images, meta_data)
number_of_frames = 100 ## we can change this to whatever number we want
images_jvc = decompressor.run(save_directory, number_of_frames)
| 31.9375
| 122
| 0.716732
|
import os
from eva_storage.jvc.preprocessor import *
from eva_storage.jvc.encoder import *
from eva_storage.jvc.decoder import *
from loaders.seattle_loader import SeattleLoader
from timer import Timer
class JVCRunner_v2:
def __init__(self):
self.preprocessor = Preprocessor()
self.compressor = Compressor()
self.decompressor = Decompressor()
self.video_loader = SeattleLoader()
def encode(self, path_to_video):
video_filename = os.path.basename(path_to_video)
images, metadata = self.video_loader.load_images(
path_to_video) .compressor.run(images, rep_indices, metadata)
def decode(self, path_to_video, number_of_samples=None):
images = self.decompressor.run(path_to_video, number_of_samples)
return images
if __name__ == "__main__":
timer = Timer() ompressor = Compressor()
decompressor = Decompressor()
video_loader = SeattleLoader()
images = video_loader.load_images()
meta_data = preprocessor.run(images)
save_directory = compressor.run(images, meta_data)
number_of_frames = 100 ry, number_of_frames)
| true
| true
|
7905d726f2f8b79ec4d8395beedb4cad2e0e70eb
| 2,580
|
py
|
Python
|
01_profiling/memory_profiler/julia1_memoryprofiler2.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 698
|
2015-01-06T14:10:26.000Z
|
2022-03-29T03:05:09.000Z
|
01_profiling/memory_profiler/julia1_memoryprofiler2.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 6
|
2015-01-12T18:03:24.000Z
|
2021-06-02T13:05:20.000Z
|
01_profiling/memory_profiler/julia1_memoryprofiler2.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 260
|
2015-01-16T13:58:57.000Z
|
2022-03-28T16:49:34.000Z
|
"""Julia set generator without optional PIL-based image drawing"""
import time
#from memory_profiler import profile
# area of complex space to investigate
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
with profile.timestamp("create_output_list"):
output = [0] * len(zs)
time.sleep(1)
with profile.timestamp("create_range_of_zs"):
iterations = range(len(zs))
with profile.timestamp("calculate_output"):
for i in iterations:
n = 0
z = zs[i]
c = cs[i]
while n < maxiter and abs(z) < 2:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(draw_output, desired_width, max_iterations):
"""Create a list of complex co-ordinates (zs) and complex parameters (cs), build Julia set and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# set width and height to the generated pixel counts, rather than the
# pre-rounding desired width and height
# build a list of co-ordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed,
# we use it to simulate a real-world scenario with several inputs to our
# function
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print "Length of x:", len(x)
print "Total elements:", len(zs)
start_time = time.time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.time()
secs = end_time - start_time
print calculate_z_serial_purepython.func_name + " took", secs, "seconds"
# this sum is expected for 1000^2 grid with 300 iterations
assert sum(output) == 33219980
# Calculate the Julia set using a pure Python solution with
# reasonable defaults for a laptop
# set draw_output to True to use PIL to draw an image
calc_pure_python(draw_output=False, desired_width=1000, max_iterations=300)
#calc_pure_python(draw_output=False, desired_width=10, max_iterations=300)
| 34.4
| 109
| 0.642248
|
"""Julia set generator without optional PIL-based image drawing"""
import time
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
with profile.timestamp("create_output_list"):
output = [0] * len(zs)
time.sleep(1)
with profile.timestamp("create_range_of_zs"):
iterations = range(len(zs))
with profile.timestamp("calculate_output"):
for i in iterations:
n = 0
z = zs[i]
c = cs[i]
while n < maxiter and abs(z) < 2:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(draw_output, desired_width, max_iterations):
"""Create a list of complex co-ordinates (zs) and complex parameters (cs), build Julia set and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print "Length of x:", len(x)
print "Total elements:", len(zs)
start_time = time.time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.time()
secs = end_time - start_time
print calculate_z_serial_purepython.func_name + " took", secs, "seconds"
assert sum(output) == 33219980
calc_pure_python(draw_output=False, desired_width=1000, max_iterations=300)
| false
| true
|
7905d75a01200606d03c1c0b340145adee5d236d
| 15,669
|
py
|
Python
|
reading_comprehension/reading_comprehension_run.py
|
Atul-Anand-Jha/reading_comprehension_tf
|
9d45ff62aa4004c466e4fe6b6639cec754199b2b
|
[
"Apache-2.0"
] | 1
|
2019-05-15T09:13:50.000Z
|
2019-05-15T09:13:50.000Z
|
reading_comprehension/reading_comprehension_run.py
|
Atul-Anand-Jha/reading_comprehension_tf
|
9d45ff62aa4004c466e4fe6b6639cec754199b2b
|
[
"Apache-2.0"
] | null | null | null |
reading_comprehension/reading_comprehension_run.py
|
Atul-Anand-Jha/reading_comprehension_tf
|
9d45ff62aa4004c466e4fe6b6639cec754199b2b
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from util.default_util import *
from util.param_util import *
from util.model_util import *
from util.eval_util import *
from util.debug_logger import *
from util.train_logger import *
from util.eval_logger import *
from util.summary_writer import *
def add_arguments(parser):
parser.add_argument("--mode", help="mode to run", required=True)
parser.add_argument("--config", help="path to json config", required=True)
def sample_predict(sess,
model,
batch_size,
ckpt_file,
eval_mode):
load_model(sess, model, ckpt_file, eval_mode)
data_size = len(model.input_data)
feed_dict, data_dict = generate_feed_dict(model, data_size, batch_size)
sess.run(model.data_pipeline.initializer, feed_dict=feed_dict)
predict_span = []
while True:
try:
infer_result = model.model.infer(sess, model.word_embedding)
predict_span.extend(infer_result.predict)
except tf.errors.OutOfRangeError:
break
predict_size = len(predict_span)
if data_size != predict_size:
raise ValueError("input data size {0} and output data size {1} is not the same".format(data_size, predict_size))
sample_result = []
for i in range(data_size):
sample_id = data_dict["input_data"][i]["id"]
context = data_dict["input_context"][i]
context_tokens = context.split(" ")
predict_start = int(predict_span[i][0])
predict_end = int(predict_span[i][1])
predict = " ".join(context_tokens[predict_start:predict_end+1])
sample_result.append({
"id": sample_id,
"context": context,
"predict": {
"text": predict,
"start": predict_start,
"end": predict_end
},
"answers": []
})
for answer in data_dict["input_data"][i]["answers"]:
label_start = int(answer["start"])
label_end = int(answer["end"])
label = " ".join(context_tokens[label_start:label_end+1])
sample_result[-1]["answers"].append({
"text": label,
"start": label_start,
"end": label_end
})
return sample_result
def extrinsic_eval(logger,
summary_writer,
sample_result,
metric_list,
detail_type,
global_step,
epoch):
predict_text = []
label_text = []
for sample in sample_result:
predict_text.append(sample["predict"]["text"])
label_text.append([])
for answer in sample["answers"]:
label_text[-1].append(answer["text"])
eval_result_list = []
sample_output = sample_result
for metric in metric_list:
score = evaluate_from_data(predict_text, label_text, metric)
summary_writer.add_value_summary(metric, score, global_step)
eval_result = ExtrinsicEvalLog(metric=metric,
score=score, sample_output=None, sample_size=len(sample_output))
eval_result_list.append(eval_result)
if detail_type == "simplified":
sample_output = { sample["id"]: sample["predict"]["text"] for sample in sample_output }
eval_result_detail = ExtrinsicEvalLog(metric="detail",
score=0.0, sample_output=sample_output, sample_size=len(sample_output))
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_extrinsic_eval(eval_result_list, basic_info)
logger.update_extrinsic_eval_detail(eval_result_detail, basic_info)
logger.check_extrinsic_eval()
logger.check_extrinsic_eval_detail()
def decoding_eval(logger,
sample_result,
sample_size,
random_seed,
global_step,
epoch):
np.random.seed(random_seed)
sample_ids = np.random.randint(0, len(sample_result)-1, size=sample_size)
sample_data = [sample_result[sample_id] for sample_id in sample_ids]
eval_result_list = []
for sample in sample_data:
sample_input = sample
sample_output = sample["predict"]["text"]
sample_reference_list = []
for answer in sample["answers"]:
sample_reference = answer["text"]
sample_reference_list.append(sample_reference)
eval_result = DecodingEvalLog(sample_input=sample_input,
sample_output=sample_output, sample_reference=sample_reference_list)
eval_result_list.append(eval_result)
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_decoding_eval(eval_result_list, basic_info)
logger.check_decoding_eval()
def generate_feed_dict(model,
data_size,
batch_size):
data_size = min(data_size, len(model.input_data))
input_data = model.input_data[:data_size]
input_answer = model.input_answer[:data_size]
input_question = model.input_question[:data_size]
input_question_word = model.input_question_word[:data_size] if model.input_question_word is not None else None
input_question_subword = model.input_question_subword[:data_size] if model.input_question_subword is not None else None
input_question_char = model.input_question_char[:data_size] if model.input_question_char is not None else None
input_context = model.input_context[:data_size]
input_context_word = model.input_context_word[:data_size] if model.input_context_word is not None else None
input_context_subword = model.input_context_subword[:data_size] if model.input_context_subword is not None else None
input_context_char = model.input_context_char[:data_size] if model.input_context_char is not None else None
data_dict = {
"data_size": data_size,
"input_data": input_data,
"input_answer": input_answer,
"input_question": input_question,
"input_question_word": input_question_word,
"input_question_subword": input_question_subword,
"input_question_char": input_question_char,
"input_context": input_context,
"input_context_word": input_context_word,
"input_context_subword": input_context_subword,
"input_context_char": input_context_char
}
feed_dict = {
model.data_pipeline.data_size_placeholder: data_size,
model.data_pipeline.batch_size_placeholder: batch_size
}
if model.data_pipeline.input_answer_placeholder is not None and input_answer is not None:
feed_dict[model.data_pipeline.input_answer_placeholder] = input_answer
if model.data_pipeline.input_question_placeholder is not None and input_question is not None:
feed_dict[model.data_pipeline.input_question_placeholder] = input_question
if model.data_pipeline.input_question_word_placeholder is not None and input_question_word is not None:
feed_dict[model.data_pipeline.input_question_word_placeholder] = input_question_word
if model.data_pipeline.input_question_subword_placeholder is not None and input_question_subword is not None:
feed_dict[model.data_pipeline.input_question_subword_placeholder] = input_question_subword
if model.data_pipeline.input_question_char_placeholder is not None and input_question_char is not None:
feed_dict[model.data_pipeline.input_question_char_placeholder] = input_question_char
if model.data_pipeline.input_context_placeholder is not None and input_context is not None:
feed_dict[model.data_pipeline.input_context_placeholder] = input_context
if model.data_pipeline.input_context_word_placeholder is not None and input_context_word is not None:
feed_dict[model.data_pipeline.input_context_word_placeholder] = input_context_word
if model.data_pipeline.input_context_subword_placeholder is not None and input_context_subword is not None:
feed_dict[model.data_pipeline.input_context_subword_placeholder] = input_context_subword
if model.data_pipeline.input_context_char_placeholder is not None and input_context_char is not None:
feed_dict[model.data_pipeline.input_context_char_placeholder] = input_context_char
return feed_dict, data_dict
def train(logger,
hyperparams,
enable_eval=True,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create train model #####")
train_model = create_train_model(logger, hyperparams)
train_sess = tf.Session(config=config_proto, graph=train_model.graph)
if enable_debug == True:
train_sess = tf_debug.LocalCLIDebugWrapperSession(train_sess)
train_summary_writer = SummaryWriter(train_model.graph, os.path.join(summary_output_dir, "train"))
init_model(train_sess, train_model)
train_logger = TrainLogger(hyperparams.data_log_output_dir)
if enable_eval == True:
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start training #####")
global_step = 0
for epoch in range(hyperparams.train_num_epoch):
feed_dict, data_dict = generate_feed_dict(train_model, len(train_model.input_answer), hyperparams.train_batch_size)
train_sess.run(train_model.data_pipeline.initializer, feed_dict=feed_dict)
step_in_epoch = 0
while True:
try:
start_time = time.time()
train_result = train_model.model.train(train_sess, train_model.word_embedding)
end_time = time.time()
global_step = train_result.global_step
step_in_epoch += 1
train_logger.update(train_result, epoch, step_in_epoch, end_time-start_time)
if step_in_epoch % hyperparams.train_step_per_stat == 0:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
if step_in_epoch % hyperparams.train_step_per_ckpt == 0:
train_model.model.save(train_sess, global_step, "debug")
if step_in_epoch % hyperparams.train_step_per_eval == 0 and enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("debug")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "debug")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
except tf.errors.OutOfRangeError:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
train_model.model.save(train_sess, global_step, "epoch")
if enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("epoch")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "epoch")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
break
train_summary_writer.close_writer()
if enable_eval == True:
infer_summary_writer.close_writer()
logger.log_print("##### finish training #####")
def evaluate(logger,
hyperparams,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start evaluation #####")
global_step = 0
eval_mode = "debug" if enable_debug == True else "epoch"
ckpt_file_list = infer_model.model.get_ckpt_list(eval_mode)
for i, ckpt_file in enumerate(ckpt_file_list):
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, eval_mode)
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, i)
decoding_eval(eval_logger, sample_result,
hyperparams.train_decoding_sample_size, hyperparams.train_random_seed, global_step, i)
infer_summary_writer.close_writer()
logger.log_print("##### finish evaluation #####")
def main(args):
hyperparams = load_hyperparams(args.config)
logger = DebugLogger(hyperparams.data_log_output_dir)
tf_version = check_tensorflow_version()
logger.log_print("# tensorflow verison is {0}".format(tf_version))
if (args.mode == 'train'):
train(logger, hyperparams, enable_eval=False, enable_debug=False)
elif (args.mode == 'train_eval'):
train(logger, hyperparams, enable_eval=True, enable_debug=False)
elif (args.mode == 'train_debug'):
train(logger, hyperparams, enable_eval=False, enable_debug=True)
elif (args.mode == 'eval'):
evaluate(logger, hyperparams, enable_debug=False)
elif (args.mode == 'eval_debug'):
evaluate(logger, hyperparams, enable_debug=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
main(args)
| 45.682216
| 130
| 0.689195
|
import argparse
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from util.default_util import *
from util.param_util import *
from util.model_util import *
from util.eval_util import *
from util.debug_logger import *
from util.train_logger import *
from util.eval_logger import *
from util.summary_writer import *
def add_arguments(parser):
parser.add_argument("--mode", help="mode to run", required=True)
parser.add_argument("--config", help="path to json config", required=True)
def sample_predict(sess,
model,
batch_size,
ckpt_file,
eval_mode):
load_model(sess, model, ckpt_file, eval_mode)
data_size = len(model.input_data)
feed_dict, data_dict = generate_feed_dict(model, data_size, batch_size)
sess.run(model.data_pipeline.initializer, feed_dict=feed_dict)
predict_span = []
while True:
try:
infer_result = model.model.infer(sess, model.word_embedding)
predict_span.extend(infer_result.predict)
except tf.errors.OutOfRangeError:
break
predict_size = len(predict_span)
if data_size != predict_size:
raise ValueError("input data size {0} and output data size {1} is not the same".format(data_size, predict_size))
sample_result = []
for i in range(data_size):
sample_id = data_dict["input_data"][i]["id"]
context = data_dict["input_context"][i]
context_tokens = context.split(" ")
predict_start = int(predict_span[i][0])
predict_end = int(predict_span[i][1])
predict = " ".join(context_tokens[predict_start:predict_end+1])
sample_result.append({
"id": sample_id,
"context": context,
"predict": {
"text": predict,
"start": predict_start,
"end": predict_end
},
"answers": []
})
for answer in data_dict["input_data"][i]["answers"]:
label_start = int(answer["start"])
label_end = int(answer["end"])
label = " ".join(context_tokens[label_start:label_end+1])
sample_result[-1]["answers"].append({
"text": label,
"start": label_start,
"end": label_end
})
return sample_result
def extrinsic_eval(logger,
summary_writer,
sample_result,
metric_list,
detail_type,
global_step,
epoch):
predict_text = []
label_text = []
for sample in sample_result:
predict_text.append(sample["predict"]["text"])
label_text.append([])
for answer in sample["answers"]:
label_text[-1].append(answer["text"])
eval_result_list = []
sample_output = sample_result
for metric in metric_list:
score = evaluate_from_data(predict_text, label_text, metric)
summary_writer.add_value_summary(metric, score, global_step)
eval_result = ExtrinsicEvalLog(metric=metric,
score=score, sample_output=None, sample_size=len(sample_output))
eval_result_list.append(eval_result)
if detail_type == "simplified":
sample_output = { sample["id"]: sample["predict"]["text"] for sample in sample_output }
eval_result_detail = ExtrinsicEvalLog(metric="detail",
score=0.0, sample_output=sample_output, sample_size=len(sample_output))
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_extrinsic_eval(eval_result_list, basic_info)
logger.update_extrinsic_eval_detail(eval_result_detail, basic_info)
logger.check_extrinsic_eval()
logger.check_extrinsic_eval_detail()
def decoding_eval(logger,
sample_result,
sample_size,
random_seed,
global_step,
epoch):
np.random.seed(random_seed)
sample_ids = np.random.randint(0, len(sample_result)-1, size=sample_size)
sample_data = [sample_result[sample_id] for sample_id in sample_ids]
eval_result_list = []
for sample in sample_data:
sample_input = sample
sample_output = sample["predict"]["text"]
sample_reference_list = []
for answer in sample["answers"]:
sample_reference = answer["text"]
sample_reference_list.append(sample_reference)
eval_result = DecodingEvalLog(sample_input=sample_input,
sample_output=sample_output, sample_reference=sample_reference_list)
eval_result_list.append(eval_result)
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_decoding_eval(eval_result_list, basic_info)
logger.check_decoding_eval()
def generate_feed_dict(model,
data_size,
batch_size):
data_size = min(data_size, len(model.input_data))
input_data = model.input_data[:data_size]
input_answer = model.input_answer[:data_size]
input_question = model.input_question[:data_size]
input_question_word = model.input_question_word[:data_size] if model.input_question_word is not None else None
input_question_subword = model.input_question_subword[:data_size] if model.input_question_subword is not None else None
input_question_char = model.input_question_char[:data_size] if model.input_question_char is not None else None
input_context = model.input_context[:data_size]
input_context_word = model.input_context_word[:data_size] if model.input_context_word is not None else None
input_context_subword = model.input_context_subword[:data_size] if model.input_context_subword is not None else None
input_context_char = model.input_context_char[:data_size] if model.input_context_char is not None else None
data_dict = {
"data_size": data_size,
"input_data": input_data,
"input_answer": input_answer,
"input_question": input_question,
"input_question_word": input_question_word,
"input_question_subword": input_question_subword,
"input_question_char": input_question_char,
"input_context": input_context,
"input_context_word": input_context_word,
"input_context_subword": input_context_subword,
"input_context_char": input_context_char
}
feed_dict = {
model.data_pipeline.data_size_placeholder: data_size,
model.data_pipeline.batch_size_placeholder: batch_size
}
if model.data_pipeline.input_answer_placeholder is not None and input_answer is not None:
feed_dict[model.data_pipeline.input_answer_placeholder] = input_answer
if model.data_pipeline.input_question_placeholder is not None and input_question is not None:
feed_dict[model.data_pipeline.input_question_placeholder] = input_question
if model.data_pipeline.input_question_word_placeholder is not None and input_question_word is not None:
feed_dict[model.data_pipeline.input_question_word_placeholder] = input_question_word
if model.data_pipeline.input_question_subword_placeholder is not None and input_question_subword is not None:
feed_dict[model.data_pipeline.input_question_subword_placeholder] = input_question_subword
if model.data_pipeline.input_question_char_placeholder is not None and input_question_char is not None:
feed_dict[model.data_pipeline.input_question_char_placeholder] = input_question_char
if model.data_pipeline.input_context_placeholder is not None and input_context is not None:
feed_dict[model.data_pipeline.input_context_placeholder] = input_context
if model.data_pipeline.input_context_word_placeholder is not None and input_context_word is not None:
feed_dict[model.data_pipeline.input_context_word_placeholder] = input_context_word
if model.data_pipeline.input_context_subword_placeholder is not None and input_context_subword is not None:
feed_dict[model.data_pipeline.input_context_subword_placeholder] = input_context_subword
if model.data_pipeline.input_context_char_placeholder is not None and input_context_char is not None:
feed_dict[model.data_pipeline.input_context_char_placeholder] = input_context_char
return feed_dict, data_dict
def train(logger,
hyperparams,
enable_eval=True,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create train model #####")
train_model = create_train_model(logger, hyperparams)
train_sess = tf.Session(config=config_proto, graph=train_model.graph)
if enable_debug == True:
train_sess = tf_debug.LocalCLIDebugWrapperSession(train_sess)
train_summary_writer = SummaryWriter(train_model.graph, os.path.join(summary_output_dir, "train"))
init_model(train_sess, train_model)
train_logger = TrainLogger(hyperparams.data_log_output_dir)
if enable_eval == True:
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start training #####")
global_step = 0
for epoch in range(hyperparams.train_num_epoch):
feed_dict, data_dict = generate_feed_dict(train_model, len(train_model.input_answer), hyperparams.train_batch_size)
train_sess.run(train_model.data_pipeline.initializer, feed_dict=feed_dict)
step_in_epoch = 0
while True:
try:
start_time = time.time()
train_result = train_model.model.train(train_sess, train_model.word_embedding)
end_time = time.time()
global_step = train_result.global_step
step_in_epoch += 1
train_logger.update(train_result, epoch, step_in_epoch, end_time-start_time)
if step_in_epoch % hyperparams.train_step_per_stat == 0:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
if step_in_epoch % hyperparams.train_step_per_ckpt == 0:
train_model.model.save(train_sess, global_step, "debug")
if step_in_epoch % hyperparams.train_step_per_eval == 0 and enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("debug")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "debug")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
except tf.errors.OutOfRangeError:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
train_model.model.save(train_sess, global_step, "epoch")
if enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("epoch")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "epoch")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
break
train_summary_writer.close_writer()
if enable_eval == True:
infer_summary_writer.close_writer()
logger.log_print("##### finish training #####")
def evaluate(logger,
hyperparams,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not tf.gfile.Exists(summary_output_dir):
tf.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start evaluation #####")
global_step = 0
eval_mode = "debug" if enable_debug == True else "epoch"
ckpt_file_list = infer_model.model.get_ckpt_list(eval_mode)
for i, ckpt_file in enumerate(ckpt_file_list):
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, eval_mode)
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, i)
decoding_eval(eval_logger, sample_result,
hyperparams.train_decoding_sample_size, hyperparams.train_random_seed, global_step, i)
infer_summary_writer.close_writer()
logger.log_print("##### finish evaluation #####")
def main(args):
hyperparams = load_hyperparams(args.config)
logger = DebugLogger(hyperparams.data_log_output_dir)
tf_version = check_tensorflow_version()
logger.log_print("# tensorflow verison is {0}".format(tf_version))
if (args.mode == 'train'):
train(logger, hyperparams, enable_eval=False, enable_debug=False)
elif (args.mode == 'train_eval'):
train(logger, hyperparams, enable_eval=True, enable_debug=False)
elif (args.mode == 'train_debug'):
train(logger, hyperparams, enable_eval=False, enable_debug=True)
elif (args.mode == 'eval'):
evaluate(logger, hyperparams, enable_debug=False)
elif (args.mode == 'eval_debug'):
evaluate(logger, hyperparams, enable_debug=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
main(args)
| true
| true
|
7905d8dd0669aab54f98840fa2a5ea2f478d2399
| 9,566
|
py
|
Python
|
povary/apps/cakegallery/migrations/0014_auto__add_field_cakegallery_created__add_field_cakegallery_updated.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/cakegallery/migrations/0014_auto__add_field_cakegallery_created__add_field_cakegallery_updated.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/cakegallery/migrations/0014_auto__add_field_cakegallery_created__add_field_cakegallery_updated.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CakeGallery.created'
db.add_column(u'cakegallery_cakegallery', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
# Adding field 'CakeGallery.updated'
db.add_column(u'cakegallery_cakegallery', 'updated',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CakeGallery.created'
db.delete_column(u'cakegallery_cakegallery', 'created')
# Deleting field 'CakeGallery.updated'
db.delete_column(u'cakegallery_cakegallery', 'updated')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cakegallery.cakecategory': {
'Meta': {'object_name': 'CakeCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'cakegallery.cakegallery': {
'Meta': {'object_name': 'CakeGallery'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'category'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategory'", 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakeimage': {
'Meta': {'object_name': 'CakeImage'},
'add_watermark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'for_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gallery'", 'null': 'True', 'to': u"orm['cakegallery.CakeGallery']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'image_alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ip_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakesubcategory': {
'Meta': {'object_name': 'CakeSubCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategories'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cakegallery']
| 75.920635
| 188
| 0.57234
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'cakegallery_cakegallery', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
db.add_column(u'cakegallery_cakegallery', 'updated',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'cakegallery_cakegallery', 'created')
db.delete_column(u'cakegallery_cakegallery', 'updated')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cakegallery.cakecategory': {
'Meta': {'object_name': 'CakeCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'cakegallery.cakegallery': {
'Meta': {'object_name': 'CakeGallery'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'category'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategory'", 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakeimage': {
'Meta': {'object_name': 'CakeImage'},
'add_watermark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'for_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gallery'", 'null': 'True', 'to': u"orm['cakegallery.CakeGallery']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'image_alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ip_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakesubcategory': {
'Meta': {'object_name': 'CakeSubCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategories'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cakegallery']
| true
| true
|
7905d8eee1cbd95cf50bad5ab70f5594720e29e3
| 3,889
|
py
|
Python
|
performance.py
|
olejak/cpme2
|
d4b6361bae24a1f5f47875850379cabe9755d654
|
[
"Apache-2.0"
] | null | null | null |
performance.py
|
olejak/cpme2
|
d4b6361bae24a1f5f47875850379cabe9755d654
|
[
"Apache-2.0"
] | null | null | null |
performance.py
|
olejak/cpme2
|
d4b6361bae24a1f5f47875850379cabe9755d654
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 by 0x7c2, Simon Brecht.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from templates import check
import func
class check_performance_ispredundancy(check):
page = "Health.Firewall"
category = "Information"
title = "ISP Redundancy"
isFirewall = True
isManagement = False
minVersion = 8020
command = "cpstat fw | grep -A5 'ISP link table' | grep '|'"
isCommand = True
def run_check(self):
for line in self.commandOut:
fields = line.split('|')
ispname = fields[1]
ispstatus = fields[2]
isprole = fields[3]
if ispname != "Name":
ipstatus = "WARN"
if ispstatus == "OK":
state = "PASS"
self.add_result(self.title + " (Name: " + ispname + ")", state, "Role: " + isprole)
else:
self.add_result(self.title, "PASS", "disabled")
class check_performance_securexl_sum(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat | grep -v Template"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split('|')
if len(data) < 4 or data[1].replace(" ","") == "" or data[1].replace(" ","") == "Id":
continue
id = data[1].replace(" ", "")
type = data[2].replace(" ", "")
status = data[3].replace(" ", "")
if status != "enabled":
state = "WARN"
else:
state = "PASS"
feature = True
self.add_result(self.title + " (Instance: " + id + ", Name: " + type + ", Status: " + status + ")", state, "")
class check_performance_securexl_templates(check):
page = "Health.SecureXL"
category = "Templates"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat| grep Templates | sed s/\ \ */\/g| sed s/Templates//g"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
if "disabled" in data[1]:
state = "WARN"
if "enabled" in data[1]:
state = "PASS"
self.add_result(self.title + " (" + data[0] + " Templates)", state, data[1])
class check_performance_securexl_statistics(check):
page = "Health.SecureXL"
category = "Statistics"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stats -s | sed 's/ */ /g' | sed 's/\t/ /g'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "PASS"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
field = data[0].strip(' ')
valraw = data[1].strip(' ').split(" ")
valnum = valraw[0]
valper = int(str(valraw[1]).replace('(','').replace(')','').replace('%',''))
if "Accelerated conns" in field and valper < 30:
state = "WARN"
if "Accelerated pkts" in field and valper < 50:
state = "WARN"
if "F2Fed" in field and valper > 40:
state = "FAIL"
self.add_result(self.title + " (" + field + ")", state, valnum + "(" + str(valper) + "%)")
class check_performance_vpn_accel(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL VPN Acceleration"
isFirewall = True
isManagement = False
minVersion = 8020
command = "vpn accel stat"
isCommand = True
def run_check(self):
found = False
for line in self.commandErr:
if "acceleration is enabled" in line:
self.add_result(self.title, 'PASS', line.strip())
found = True
if not found:
self.add_result(self.title, 'FAIL', str(self.commandOut) + str(self.commandErr))
| 28.386861
| 113
| 0.61404
|
from templates import check
import func
class check_performance_ispredundancy(check):
page = "Health.Firewall"
category = "Information"
title = "ISP Redundancy"
isFirewall = True
isManagement = False
minVersion = 8020
command = "cpstat fw | grep -A5 'ISP link table' | grep '|'"
isCommand = True
def run_check(self):
for line in self.commandOut:
fields = line.split('|')
ispname = fields[1]
ispstatus = fields[2]
isprole = fields[3]
if ispname != "Name":
ipstatus = "WARN"
if ispstatus == "OK":
state = "PASS"
self.add_result(self.title + " (Name: " + ispname + ")", state, "Role: " + isprole)
else:
self.add_result(self.title, "PASS", "disabled")
class check_performance_securexl_sum(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat | grep -v Template"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split('|')
if len(data) < 4 or data[1].replace(" ","") == "" or data[1].replace(" ","") == "Id":
continue
id = data[1].replace(" ", "")
type = data[2].replace(" ", "")
status = data[3].replace(" ", "")
if status != "enabled":
state = "WARN"
else:
state = "PASS"
feature = True
self.add_result(self.title + " (Instance: " + id + ", Name: " + type + ", Status: " + status + ")", state, "")
class check_performance_securexl_templates(check):
page = "Health.SecureXL"
category = "Templates"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stat| grep Templates | sed s/\ \ */\/g| sed s/Templates//g"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
if "disabled" in data[1]:
state = "WARN"
if "enabled" in data[1]:
state = "PASS"
self.add_result(self.title + " (" + data[0] + " Templates)", state, data[1])
class check_performance_securexl_statistics(check):
page = "Health.SecureXL"
category = "Statistics"
title = "SecureXL"
isFirewall = True
isManagement = False
minVersion = 8020
command = "fwaccel stats -s | sed 's/ */ /g' | sed 's/\t/ /g'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "PASS"
data = line.strip('\n').split(":")
if len(data) < 2:
continue
field = data[0].strip(' ')
valraw = data[1].strip(' ').split(" ")
valnum = valraw[0]
valper = int(str(valraw[1]).replace('(','').replace(')','').replace('%',''))
if "Accelerated conns" in field and valper < 30:
state = "WARN"
if "Accelerated pkts" in field and valper < 50:
state = "WARN"
if "F2Fed" in field and valper > 40:
state = "FAIL"
self.add_result(self.title + " (" + field + ")", state, valnum + "(" + str(valper) + "%)")
class check_performance_vpn_accel(check):
page = "Health.SecureXL"
category = "Information"
title = "SecureXL VPN Acceleration"
isFirewall = True
isManagement = False
minVersion = 8020
command = "vpn accel stat"
isCommand = True
def run_check(self):
found = False
for line in self.commandErr:
if "acceleration is enabled" in line:
self.add_result(self.title, 'PASS', line.strip())
found = True
if not found:
self.add_result(self.title, 'FAIL', str(self.commandOut) + str(self.commandErr))
| true
| true
|
7905da66c1e619153c75d7e05cad748710d63849
| 2,090
|
py
|
Python
|
tensorflow/python/keras/_impl/keras/datasets/cifar10.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 22
|
2018-01-13T14:52:47.000Z
|
2018-07-05T01:00:28.000Z
|
tensorflow/python/keras/_impl/keras/datasets/cifar10.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | 3
|
2018-05-09T11:31:58.000Z
|
2021-01-27T12:26:21.000Z
|
tensorflow/python/keras/_impl/keras/datasets/cifar10.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | 13
|
2018-02-22T21:04:13.000Z
|
2020-11-17T11:38:36.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| 34.262295
| 80
| 0.691388
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| true
| true
|
7905dc4bc915252ba607c44bd188b2cc732b9fb9
| 15,732
|
py
|
Python
|
test/sql/test_insert_exec.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | 1
|
2021-09-04T18:25:05.000Z
|
2021-09-04T18:25:05.000Z
|
test/sql/test_insert_exec.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | null | null | null |
test/sql/test_insert_exec.py
|
lxl0928/timi_sqlalchemy
|
ebd3abc1e7bc23f211ef11ed05ef821233d066cc
|
[
"MIT"
] | 21
|
2017-11-13T13:23:27.000Z
|
2019-10-07T02:00:52.000Z
|
from sqlalchemy import and_
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
]
).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
users.insert(values=[(9, "jack"), (10, "ed")]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
users.insert().execute,
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{"user_id": 7}, {"user_id": 8, "user_name": "ed"}, {"user_id": 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against("firebird", "postgresql", "oracle", "mssql"):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if("sqlite")
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq", optional=True),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
"sqlite", "sqlite autoincremnt doesn't work with composite pks"
)
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, Sequence("t_id_seq"), primary_key=True),
Column("data", String(50)),
Column("x", Integer),
)
def _fixture(self, types=True):
if types:
t = sql.table(
"foo",
sql.column("id", Integer),
sql.column("data", String),
sql.column("x", Integer),
)
else:
t = sql.table(
"foo", sql.column("id"), sql.column("data"), sql.column("x")
)
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().order_by(self.tables.foo.c.id)
).fetchall(),
data,
)
@testing.requires.sequences
def test_explicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence("t_id_seq")), data="data", x=5
),
(1, "data", 5),
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue",
)
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[None],
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{"data": "d1", "x": 5},
{"data": "d2", "x": 6},
{"data": "d3", "x": 7},
],
[(1, "d1", 5), (2, "d2", 6), (3, "d3", 7)],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
| 30.968504
| 79
| 0.515446
|
from sqlalchemy import and_
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
]
).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
users.insert(values=[(9, "jack"), (10, "ed")]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self):
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
users.insert().execute,
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
)
# a length check on all subsequent parameters.
users.insert().execute(
{"user_id": 7}, {"user_id": 8, "user_name": "ed"}, {"user_id": 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
def insert_values(engine, table_, values):
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against("firebird", "postgresql", "oracle", "mssql"):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if("sqlite")
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq", optional=True),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
"sqlite", "sqlite autoincremnt doesn't work with composite pks"
)
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
class TableInsertTest(fixtures.TablesTest):
run_create_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, Sequence("t_id_seq"), primary_key=True),
Column("data", String(50)),
Column("x", Integer),
)
def _fixture(self, types=True):
if types:
t = sql.table(
"foo",
sql.column("id", Integer),
sql.column("data", String),
sql.column("x", Integer),
)
else:
t = sql.table(
"foo", sql.column("id"), sql.column("data"), sql.column("x")
)
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().order_by(self.tables.foo.c.id)
).fetchall(),
data,
)
@testing.requires.sequences
def test_explicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence("t_id_seq")), data="data", x=5
),
(1, "data", 5),
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue",
)
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[None],
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{"data": "d1", "x": 5},
{"data": "d2", "x": 6},
{"data": "d3", "x": 7},
],
[(1, "d1", 5), (2, "d2", 6), (3, "d3", 7)],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
| true
| true
|
7905dc86e6b2268b3729640356d766f5c8951bc3
| 712
|
py
|
Python
|
problem0021.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0021.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0021.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
#
# #21 Amicable numbers - Project Euler
# https://projecteuler.net/problem=21
#
# Code by Kevin Marciniak
#
###########################
def sumproperdivisors(num):
sum = 0
for x in range(1, int((num / 2)) + 1):
if num % x == 0:
sum += x
return sum
amicableList = []
for x in range(0, 10000):
temp = sumproperdivisors(x)
if sumproperdivisors(temp) == x and sumproperdivisors(x) == temp and temp != x:
if x not in amicableList and temp not in amicableList:
amicableList.append(x)
amicableList.append(temp)
totalSum = 0
for y in range(0, len(amicableList)):
totalSum += amicableList[y]
print(totalSum)
| 20.941176
| 83
| 0.573034
| true
| true
|
|
7905de86b183dae3ebbca47c228963971c53ce7c
| 921
|
py
|
Python
|
var/spack/repos/builtin/packages/r-ica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/r-ica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/r-ica/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIca(RPackage):
"""Independent Component Analysis (ICA) using various algorithms: FastICA,
Information-Maximization (Infomax), and Joint Approximate Diagonalization
of Eigenmatrices (JADE)."""
homepage = "https://cloud.r-project.org/package=ica"
url = "https://cloud.r-project.org/src/contrib/ica_1.0-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ica"
version('1.0-2', sha256='e721596fc6175d3270a60d5e0b5b98be103a8fd0dd93ef16680af21fe0b54179')
version('1.0-1', sha256='98559a8bb12dd134a40ce8fd133803e2a38456b45d0e2a507d66022a8e2274ae')
version('1.0-0', sha256='9ff4ec7f4525bdce9d7859b22a1a170a1f6f9f7fb9f3d0b537dcaec77cd83d01')
| 43.857143
| 95
| 0.763301
|
from spack import *
class RIca(RPackage):
homepage = "https://cloud.r-project.org/package=ica"
url = "https://cloud.r-project.org/src/contrib/ica_1.0-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ica"
version('1.0-2', sha256='e721596fc6175d3270a60d5e0b5b98be103a8fd0dd93ef16680af21fe0b54179')
version('1.0-1', sha256='98559a8bb12dd134a40ce8fd133803e2a38456b45d0e2a507d66022a8e2274ae')
version('1.0-0', sha256='9ff4ec7f4525bdce9d7859b22a1a170a1f6f9f7fb9f3d0b537dcaec77cd83d01')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.