hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f5eda99e3950bc0e18d183778fcbcc24d6fee5 | 515 | py | Python | 1977.py | dayaelee/baekjoon | cf0e2b8b29dcd759c90f4736f6c26dd1982c72a3 | [
"MIT"
] | null | null | null | 1977.py | dayaelee/baekjoon | cf0e2b8b29dcd759c90f4736f6c26dd1982c72a3 | [
"MIT"
] | null | null | null | 1977.py | dayaelee/baekjoon | cf0e2b8b29dcd759c90f4736f6c26dd1982c72a3 | [
"MIT"
] | null | null | null | short=int(input())
max=int(input())
start = 1
sum=0
bb=[]
while(1):
n_start = start * start
if(n_start<short):
start=start+1
continue
elif(n_start>=short and n_start<=max):
sum=n_start+sum
bb.append(int(sum))
start=1+start
a=n_start
continue
elif(n_start>max):
if(sum==0):
print('-1')
break
else:
print(sum)
print(bb[0])
break
break
else:
break
| 17.166667 | 42 | 0.475728 | short=int(input())
max=int(input())
start = 1
sum=0
bb=[]
while(1):
n_start = start * start
if(n_start<short):
start=start+1
continue
elif(n_start>=short and n_start<=max):
sum=n_start+sum
bb.append(int(sum))
start=1+start
a=n_start
continue
elif(n_start>max):
if(sum==0):
print('-1')
break
else:
print(sum)
print(bb[0])
break
break
else:
break
| true | true |
f7f5ee4e0e6ca15330849bf2d391b8ca5cf2a707 | 7,384 | py | Python | p2pfs/ui/terminal.py | yxwangcs/p2pfs | 57e90d8f911de36da70f5977822cde609d1c3561 | [
"MIT"
] | 2 | 2020-07-02T12:09:19.000Z | 2020-08-26T15:48:15.000Z | p2pfs/ui/terminal.py | RyanWangGit/p2pfs | adcbf999010289e46c041aecc9af5c734c6de25e | [
"MIT"
] | null | null | null | p2pfs/ui/terminal.py | RyanWangGit/p2pfs | adcbf999010289e46c041aecc9af5c734c6de25e | [
"MIT"
] | 2 | 2020-07-19T04:15:53.000Z | 2021-01-16T20:31:48.000Z | import os
from asyncio import IncompleteReadError
from beautifultable import BeautifulTable
from p2pfs.core.tracker import Tracker
from p2pfs.core.peer import Peer
from p2pfs.core.exceptions import *
import p2pfs.ui.aiocmd as aiocmd
from aioconsole.stream import get_standard_streams
import logging
class TrackerTerminal(aiocmd.Cmd):
INTRO = 'Welcome to \033[1mTracker\033[0m terminal. Type help or ? to list commands.\n'
PROMPT = '\033[1mTracker>\033[0m '
def __init__(self, tracker):
assert isinstance(tracker, Tracker)
self._tracker = tracker
super().__init__()
async def do_start(self, arg):
arg = arg.split(' ')
if len(arg) < 2:
print('Not enough argument, start <host> <port>')
else:
try:
await self._tracker.start((arg[0], int(arg[1])))
except ServerRunningError:
print('Tracker is already running.')
except OSError as e:
if e.errno == 48:
print('Cannot bind on address {}:{}.'.format(arg[0], arg[1]))
else:
raise
print('Tracker started listening on {}'.format(self._tracker.address()))
async def do_list_files(self, arg):
file_list_dict = self._tracker.file_list()
table = BeautifulTable()
table.rows.separator = ''
for filename, fileinfo in file_list_dict.items():
if len(table.columns) == 0:
table.columns.header = ['Filename'] + list(map(lambda x: x.capitalize(), tuple(fileinfo.keys())))
table.rows.append((filename, ) + tuple(fileinfo.values()))
_, std_writer = await get_standard_streams()
std_writer.write(str(table).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_list_peers(self, arg):
table = BeautifulTable()
table.rows.separator = ''
table.columns.header = ['Peer Address']
for peer in self._tracker.peers():
table.rows.append([peer])
_, std_writer = await get_standard_streams()
std_writer.write(str(table).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_list_chunkinfo(self, arg):
# TODO: pretty print chunk info
_, std_writer = await get_standard_streams()
std_writer.write(str(self._tracker.chunkinfo()).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_exit(self, arg):
await self._tracker.stop()
return True
class PeerTerminal(aiocmd.Cmd):
INTRO = 'Welcome to \033[1mPeer\033[0m terminal. Type help or ? to list commands.\n'
PROMPT = '\033[1mPeer>\033[0m '
def __init__(self, peer):
assert isinstance(peer, Peer)
self._peer = peer
super().__init__()
async def do_publish(self, arg):
arg = arg.split(' ')[0]
try:
await self._peer.publish(arg)
except FileNotFoundError:
print('File {} doesn\'t exist.'.format(arg))
except FileExistsError:
print('File {} already registered on tracker, use \'list_files\' to see.'.format(arg))
except TrackerNotConnectedError:
print('Tracker is not connected. Use \'connect <tracker_ip> <tracker_port> to connect.\' ')
except (ConnectionError, RuntimeError, IncompleteReadError):
print('Error occurred during communications with tracker, try to re-connect.')
except InProgressError:
print('Publish file {} already in progress.'.format(arg))
else:
print('File {} successfully published on tracker.'.format(arg))
async def do_set_delay(self, arg):
arg = arg.split(' ')[0]
if arg == '':
print('Usage: set_delay <delay>, <delay> is required.')
else:
self._peer.set_delay(float(arg))
print('Delay {} successfully set.'.format(arg))
async def do_connect(self, arg):
arg = arg.split(' ')
if len(arg) < 2:
print('More arguments required! Usage: connect <address> <port>')
try:
await self._peer.connect((arg[0], int(arg[1])))
except AlreadyConnectedError as e:
print('Peer already connected to {}.'.format(e.address))
except ConnectionRefusedError:
print('Cannot connect to tracker.')
except (ConnectionError, RuntimeError, IncompleteReadError, AssertionError):
print('Error occurred during communications with tracker.')
else:
print('Successfully connected!')
async def do_list_files(self, arg):
try:
file_list_dict = await self._peer.list_file()
except TrackerNotConnectedError:
print('Tracker is not connected, try \'connect <tracker_ip> <tracker_port>\' to connect.')
except (ConnectionError, RuntimeError, IncompleteReadError):
print('Error occured during communications with tracker, '
'try \'connect <tracker_ip> <tracker_port>\' to re-connect.')
else:
table = BeautifulTable()
table.rows.separator = ''
for filename, fileinfo in file_list_dict.items():
if len(table.columns) == 0:
table.columns.header = ['Filename'] + list(map(lambda x: x.capitalize(), tuple(fileinfo.keys())))
table.rows.append((filename,) + tuple(fileinfo.values()))
print(table)
async def do_download(self, arg):
filename, destination, *_ = arg.split(' ')
from tqdm import tqdm
def tqdm_hook_wrapper(t):
last_chunk = [0]
def update_to(chunknum=1, chunksize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update((chunknum - last_chunk[0]) * chunksize)
last_chunk[0] = chunknum
return update_to
try:
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc='Downloading ...') as t:
# no report hook if we need debug logging (too many logs will cause trouble to tqdm)
hook = tqdm_hook_wrapper(t) if logging.getLogger().getEffectiveLevel() != logging.DEBUG else None
await self._peer.download(filename, destination, reporthook=hook)
except TrackerNotConnectedError:
print('Tracker not connected, cannot pull initial chunk information.')
except FileNotFoundError:
print('File {} doesn\'t exist, please check filename and try again.'.format(filename))
except (IncompleteReadError, ConnectionError, RuntimeError):
print('Error occurred during transmission.')
except DownloadIncompleteError as e:
print('File chunk # {} doesn\'t exist on any peers, download isn\'t completed.'.format(e.chunknum))
# try to remove incomplete file
try:
os.remove(destination)
except FileNotFoundError:
pass
else:
print('File {} successfully downloaded to {}.'.format(filename, destination))
async def do_exit(self, arg):
await self._peer.stop()
return True
| 41.022222 | 117 | 0.605228 | import os
from asyncio import IncompleteReadError
from beautifultable import BeautifulTable
from p2pfs.core.tracker import Tracker
from p2pfs.core.peer import Peer
from p2pfs.core.exceptions import *
import p2pfs.ui.aiocmd as aiocmd
from aioconsole.stream import get_standard_streams
import logging
class TrackerTerminal(aiocmd.Cmd):
INTRO = 'Welcome to \033[1mTracker\033[0m terminal. Type help or ? to list commands.\n'
PROMPT = '\033[1mTracker>\033[0m '
def __init__(self, tracker):
assert isinstance(tracker, Tracker)
self._tracker = tracker
super().__init__()
async def do_start(self, arg):
arg = arg.split(' ')
if len(arg) < 2:
print('Not enough argument, start <host> <port>')
else:
try:
await self._tracker.start((arg[0], int(arg[1])))
except ServerRunningError:
print('Tracker is already running.')
except OSError as e:
if e.errno == 48:
print('Cannot bind on address {}:{}.'.format(arg[0], arg[1]))
else:
raise
print('Tracker started listening on {}'.format(self._tracker.address()))
async def do_list_files(self, arg):
file_list_dict = self._tracker.file_list()
table = BeautifulTable()
table.rows.separator = ''
for filename, fileinfo in file_list_dict.items():
if len(table.columns) == 0:
table.columns.header = ['Filename'] + list(map(lambda x: x.capitalize(), tuple(fileinfo.keys())))
table.rows.append((filename, ) + tuple(fileinfo.values()))
_, std_writer = await get_standard_streams()
std_writer.write(str(table).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_list_peers(self, arg):
table = BeautifulTable()
table.rows.separator = ''
table.columns.header = ['Peer Address']
for peer in self._tracker.peers():
table.rows.append([peer])
_, std_writer = await get_standard_streams()
std_writer.write(str(table).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_list_chunkinfo(self, arg):
_, std_writer = await get_standard_streams()
std_writer.write(str(self._tracker.chunkinfo()).encode('utf-8'))
std_writer.write('\n'.encode('utf-8'))
await std_writer.drain()
async def do_exit(self, arg):
await self._tracker.stop()
return True
class PeerTerminal(aiocmd.Cmd):
INTRO = 'Welcome to \033[1mPeer\033[0m terminal. Type help or ? to list commands.\n'
PROMPT = '\033[1mPeer>\033[0m '
def __init__(self, peer):
assert isinstance(peer, Peer)
self._peer = peer
super().__init__()
async def do_publish(self, arg):
arg = arg.split(' ')[0]
try:
await self._peer.publish(arg)
except FileNotFoundError:
print('File {} doesn\'t exist.'.format(arg))
except FileExistsError:
print('File {} already registered on tracker, use \'list_files\' to see.'.format(arg))
except TrackerNotConnectedError:
print('Tracker is not connected. Use \'connect <tracker_ip> <tracker_port> to connect.\' ')
except (ConnectionError, RuntimeError, IncompleteReadError):
print('Error occurred during communications with tracker, try to re-connect.')
except InProgressError:
print('Publish file {} already in progress.'.format(arg))
else:
print('File {} successfully published on tracker.'.format(arg))
async def do_set_delay(self, arg):
arg = arg.split(' ')[0]
if arg == '':
print('Usage: set_delay <delay>, <delay> is required.')
else:
self._peer.set_delay(float(arg))
print('Delay {} successfully set.'.format(arg))
async def do_connect(self, arg):
arg = arg.split(' ')
if len(arg) < 2:
print('More arguments required! Usage: connect <address> <port>')
try:
await self._peer.connect((arg[0], int(arg[1])))
except AlreadyConnectedError as e:
print('Peer already connected to {}.'.format(e.address))
except ConnectionRefusedError:
print('Cannot connect to tracker.')
except (ConnectionError, RuntimeError, IncompleteReadError, AssertionError):
print('Error occurred during communications with tracker.')
else:
print('Successfully connected!')
async def do_list_files(self, arg):
try:
file_list_dict = await self._peer.list_file()
except TrackerNotConnectedError:
print('Tracker is not connected, try \'connect <tracker_ip> <tracker_port>\' to connect.')
except (ConnectionError, RuntimeError, IncompleteReadError):
print('Error occured during communications with tracker, '
'try \'connect <tracker_ip> <tracker_port>\' to re-connect.')
else:
table = BeautifulTable()
table.rows.separator = ''
for filename, fileinfo in file_list_dict.items():
if len(table.columns) == 0:
table.columns.header = ['Filename'] + list(map(lambda x: x.capitalize(), tuple(fileinfo.keys())))
table.rows.append((filename,) + tuple(fileinfo.values()))
print(table)
async def do_download(self, arg):
filename, destination, *_ = arg.split(' ')
from tqdm import tqdm
def tqdm_hook_wrapper(t):
last_chunk = [0]
def update_to(chunknum=1, chunksize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update((chunknum - last_chunk[0]) * chunksize)
last_chunk[0] = chunknum
return update_to
try:
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc='Downloading ...') as t:
# no report hook if we need debug logging (too many logs will cause trouble to tqdm)
hook = tqdm_hook_wrapper(t) if logging.getLogger().getEffectiveLevel() != logging.DEBUG else None
await self._peer.download(filename, destination, reporthook=hook)
except TrackerNotConnectedError:
print('Tracker not connected, cannot pull initial chunk information.')
except FileNotFoundError:
print('File {} doesn\'t exist, please check filename and try again.'.format(filename))
except (IncompleteReadError, ConnectionError, RuntimeError):
print('Error occurred during transmission.')
except DownloadIncompleteError as e:
print('File chunk # {} doesn\'t exist on any peers, download isn\'t completed.'.format(e.chunknum))
try:
os.remove(destination)
except FileNotFoundError:
pass
else:
print('File {} successfully downloaded to {}.'.format(filename, destination))
async def do_exit(self, arg):
await self._peer.stop()
return True
| true | true |
f7f5ee92b7d313e61274a3c6c4b2cede1c3cf934 | 1,244 | py | Python | usocketio/protocol.py | eliclement/uwebsockets | 440edabe31f85fced35d652c18d9ac26509f8bb6 | [
"MIT"
] | 119 | 2016-09-07T00:11:49.000Z | 2022-03-26T08:19:12.000Z | nodemcu/usocketio/protocol.py | juergs/bme680_nodemcu_socketio | 1d2264b593bf70c5248c3db8fbbaa34d8f15f50e | [
"MIT"
] | 17 | 2016-09-23T20:53:01.000Z | 2022-03-06T02:42:48.000Z | nodemcu/usocketio/protocol.py | juergs/bme680_nodemcu_socketio | 1d2264b593bf70c5248c3db8fbbaa34d8f15f50e | [
"MIT"
] | 37 | 2016-12-20T09:35:55.000Z | 2022-01-16T20:38:27.000Z | """
Socket.io/Engine.io protocol constructs
"""
PAYLOAD_STRING = const(0)
PAYLOAD_BINARY = const(1)
PACKET_OPEN = const(0)
PACKET_CLOSE = const(1)
PACKET_PING = const(2)
PACKET_PONG = const(3)
PACKET_MESSAGE = const(4)
PACKET_UPGRADE = const(5)
PACKET_NOOP = const(6)
MESSAGE_CONNECT = const(0)
MESSAGE_DISCONNECT = const(1)
MESSAGE_EVENT = const(2)
MESSAGE_ACK = const(3)
MESSAGE_ERROR = const(4)
MESSAGE_BINARY_EVENT = const(5)
MESSAGE_BINARY_ACK = const(6)
def decode_packet(buf):
if isinstance(buf, str) and buf[0] == 'b':
# FIXME: implement base64 protocol
raise NotImplementedError()
return int(buf[0]), buf[1:]
def decode_payload(buf):
buf = memoryview(buf)
while buf:
type_ = buf[0]
buf = buf[1:]
length = 0
while True:
c = buf[0]
buf = buf[1:]
if c == 0xff:
break
length *= 10
length += c
packet = bytes(buf[:length])
if type_ == PAYLOAD_STRING:
packet = packet.decode('utf-8')
elif type_ == PAYLOAD_BINARY:
pass
else:
raise NotImplementedError()
yield decode_packet(packet)
buf = buf[length:]
| 19.4375 | 46 | 0.589228 |
PAYLOAD_STRING = const(0)
PAYLOAD_BINARY = const(1)
PACKET_OPEN = const(0)
PACKET_CLOSE = const(1)
PACKET_PING = const(2)
PACKET_PONG = const(3)
PACKET_MESSAGE = const(4)
PACKET_UPGRADE = const(5)
PACKET_NOOP = const(6)
MESSAGE_CONNECT = const(0)
MESSAGE_DISCONNECT = const(1)
MESSAGE_EVENT = const(2)
MESSAGE_ACK = const(3)
MESSAGE_ERROR = const(4)
MESSAGE_BINARY_EVENT = const(5)
MESSAGE_BINARY_ACK = const(6)
def decode_packet(buf):
if isinstance(buf, str) and buf[0] == 'b':
raise NotImplementedError()
return int(buf[0]), buf[1:]
def decode_payload(buf):
buf = memoryview(buf)
while buf:
type_ = buf[0]
buf = buf[1:]
length = 0
while True:
c = buf[0]
buf = buf[1:]
if c == 0xff:
break
length *= 10
length += c
packet = bytes(buf[:length])
if type_ == PAYLOAD_STRING:
packet = packet.decode('utf-8')
elif type_ == PAYLOAD_BINARY:
pass
else:
raise NotImplementedError()
yield decode_packet(packet)
buf = buf[length:]
| true | true |
f7f5eea2333115caf27e5a8df0623525ea435756 | 5,751 | py | Python | oeml-sdk/python/openapi_client/api/balances_api.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | 1 | 2020-07-23T05:47:52.000Z | 2020-07-23T05:47:52.000Z | oeml-sdk/python/openapi_client/api/balances_api.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | null | null | null | oeml-sdk/python/openapi_client/api/balances_api.py | oskaralfons/coinapi-sdk | 2c79b6d91d0f702040dd865e79f0774a4bba9bb3 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
OEML - REST API
This section will provide necessary information about the `CoinAPI OEML REST API` protocol. This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> # noqa: E501
The version of the OpenAPI document: v1
Contact: support@coinapi.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class BalancesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_balances_get(self, **kwargs): # noqa: E501
"""Get balances # noqa: E501
Get current currency balance from all or single exchange. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_balances_get(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str exchange_id: Filter the balances to the specific exchange.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Balance]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v1_balances_get_with_http_info(**kwargs) # noqa: E501
def v1_balances_get_with_http_info(self, **kwargs): # noqa: E501
"""Get balances # noqa: E501
Get current currency balance from all or single exchange. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_balances_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str exchange_id: Filter the balances to the specific exchange.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Balance], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'exchange_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_balances_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'exchange_id' in local_var_params and local_var_params['exchange_id'] is not None: # noqa: E501
query_params.append(('exchange_id', local_var_params['exchange_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'appliction/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/balances', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Balance]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.597315 | 261 | 0.604069 |
from __future__ import absolute_import
import re
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class BalancesApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_balances_get(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.v1_balances_get_with_http_info(**kwargs)
def v1_balances_get_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'exchange_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_balances_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'exchange_id' in local_var_params and local_var_params['exchange_id'] is not None:
query_params.append(('exchange_id', local_var_params['exchange_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'appliction/json'])
auth_settings = []
return self.api_client.call_api(
'/v1/balances', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Balance]',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f7f5eee62e5e147c0608594bf082fab2159288f4 | 17 | py | Python | py3status/version.py | saengowp/py3status | 65a609261095f7502fc2814e9a6feb478845a69c | [
"BSD-3-Clause"
] | null | null | null | py3status/version.py | saengowp/py3status | 65a609261095f7502fc2814e9a6feb478845a69c | [
"BSD-3-Clause"
] | null | null | null | py3status/version.py | saengowp/py3status | 65a609261095f7502fc2814e9a6feb478845a69c | [
"BSD-3-Clause"
] | null | null | null | version = "3.18"
| 8.5 | 16 | 0.588235 | version = "3.18"
| true | true |
f7f5eef9f8c971b1407b13153918a5f69b8d2b84 | 1,100 | py | Python | awards/urls.py | francismuk/Awards | 8b37b1cffb8ab28275fbdf7f648ec30c0c1d24b6 | [
"MIT"
] | null | null | null | awards/urls.py | francismuk/Awards | 8b37b1cffb8ab28275fbdf7f648ec30c0c1d24b6 | [
"MIT"
] | 6 | 2020-02-12T03:02:55.000Z | 2021-09-08T01:11:26.000Z | awards/urls.py | francismuk/Awards | 8b37b1cffb8ab28275fbdf7f648ec30c0c1d24b6 | [
"MIT"
] | null | null | null | """awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('award.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| 35.483871 | 79 | 0.698182 | from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('award.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| true | true |
f7f5f08a25874160189facb7f6341772ad2dd45f | 95 | py | Python | lowercase.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | 6 | 2020-01-30T13:36:53.000Z | 2022-02-05T08:14:56.000Z | lowercase.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | null | null | null | lowercase.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | 1 | 2020-01-30T13:36:33.000Z | 2020-01-30T13:36:33.000Z | from voussoirkit import pipeable
for line in pipeable.go():
pipeable.stdout(line.lower())
| 19 | 33 | 0.747368 | from voussoirkit import pipeable
for line in pipeable.go():
pipeable.stdout(line.lower())
| true | true |
f7f5f10fa1d85b4d2c5c69b518ffb9e2f23ce246 | 391 | py | Python | python_fishc/3.1.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | 1 | 2020-11-29T14:42:01.000Z | 2020-11-29T14:42:01.000Z | python_fishc/3.1.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | null | null | null | python_fishc/3.1.py | iisdd/Courses | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | [
"MIT"
] | null | null | null | '''
1. 关于最后提到的长字符串(三重引号字符串)
其实在 Python3 还可以这么写,不妨试试,然后比较下哪种更方便?
>>> string = (
"我爱鱼C,\n"
"正如我爱小甲鱼,\n"
"他那呱唧呱唧的声音,\n"
"总缠绕于我的脑海,\n"
"久久不肯散去……\n")
'''
str1 = '''又是一个深夜凌晨我不想睡
多久才能够买房还要赚多少出场费
玩说唱怎么养活自己也许你说的对
可谁想当个没有出息的窝囊废
'''
str2 = (
'我想过很多种的死法但我现在依然活着\n'
'没人相信我能做音乐我自己就是伯乐\n'
'不想朝九晚五你就说我顽固走弯路\n'
'你只在乎我的收入不会在乎我的专注\n')
print (str1 , end = '')
print (str2)
| 17 | 36 | 0.629156 | str1 = '''又是一个深夜凌晨我不想睡
多久才能够买房还要赚多少出场费
玩说唱怎么养活自己也许你说的对
可谁想当个没有出息的窝囊废
'''
str2 = (
'我想过很多种的死法但我现在依然活着\n'
'没人相信我能做音乐我自己就是伯乐\n'
'不想朝九晚五你就说我顽固走弯路\n'
'你只在乎我的收入不会在乎我的专注\n')
print (str1 , end = '')
print (str2)
| true | true |
f7f5f1c2e5e28de4c28b3200d0e7b1411916fbe5 | 2,840 | py | Python | realsense_face_detection.py | kylelscott/Face-Recognition-with-Intel-D435i | c48530a1bdcd6de23f1b6c02d5a2f60f290b23d8 | [
"Apache-2.0"
] | 3 | 2021-02-04T14:06:26.000Z | 2022-02-02T02:57:17.000Z | realsense_face_detection.py | kylelscott/Face-Recognition-with-Intel-D435i | c48530a1bdcd6de23f1b6c02d5a2f60f290b23d8 | [
"Apache-2.0"
] | null | null | null | realsense_face_detection.py | kylelscott/Face-Recognition-with-Intel-D435i | c48530a1bdcd6de23f1b6c02d5a2f60f290b23d8 | [
"Apache-2.0"
] | 1 | 2021-02-12T22:47:05.000Z | 2021-02-12T22:47:05.000Z | ## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
import pyrealsense2 as rs
import numpy as np
import cv2
import time
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
#depth stream not needed
#config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
print("Loading model")
net = cv2.dnn.readNetFromCaffe("deploy.prototxt.txt", "res10_300x300_ssd_iter_140000.caffemodel")
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
if not color_frame:
continue
#begin timer to show frames per second
start = time.time()
# Convert images to numpy arrays
color_image = np.asanyarray(color_frame.get_data())
# Create alignment primitive with color as its target stream:
align = rs.align(rs.stream.color)
frames = align.process(frames)
# Begin the detection portion
(h,w) = color_image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(color_image,(300,300)),1.0,(300,300),(104.0, 177.0, 123.0))
net.setInput(blob, "data")
detections = net.forward("detection_out")
# loop over the detections
for i in range(0, detections.shape[2]):
# extract confidence and prediction
confidence = detections[0, 0, i, 2]
# filter detections by confidence greater than minimum value
print(confidence)
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box and write confidence
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(color_image, (startX, startY), (endX, endY),(255, 255, 255), 2)
cv2.putText(color_image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2)
# show the output image
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow("Realsense", color_image)
cv2.waitKey(1)
#end timer to show frames per second
end = time.time()
sec = end-start
print("time:{0}".format(sec))
#show frames per second
fps = 1/sec
print("fps:{0}".format(fps))
finally:
# Stop streaming
pipeline.stop()
| 34.634146 | 111 | 0.601408 | false | true | |
f7f5f1d3ef99d5b8072e48178de1228ee5b15b3f | 653 | py | Python | financial/views/user.py | dritux/financial-tools | 0e55e84b3590e86e11d52cd3db1d1a1a83adc236 | [
"MIT"
] | null | null | null | financial/views/user.py | dritux/financial-tools | 0e55e84b3590e86e11d52cd3db1d1a1a83adc236 | [
"MIT"
] | null | null | null | financial/views/user.py | dritux/financial-tools | 0e55e84b3590e86e11d52cd3db1d1a1a83adc236 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, request
from financial.models import User, users_schema
from financial import db
user = Blueprint('user', __name__, url_prefix='')
@user.route("/user", methods=['GET'])
def get():
users = User.query.all()
result = users_schema.dump(users)
return jsonify(user=result.data)
@user.route('/user', methods=['POST'])
def create():
name = request.json['name']
username = request.json['username']
password = request.json['password']
user = User(
name,
username,
password
)
db.session.add(user)
db.session.commit()
return user_schema.jsonify(user)
| 21.064516 | 49 | 0.655436 | from flask import Blueprint, jsonify, request
from financial.models import User, users_schema
from financial import db
user = Blueprint('user', __name__, url_prefix='')
@user.route("/user", methods=['GET'])
def get():
users = User.query.all()
result = users_schema.dump(users)
return jsonify(user=result.data)
@user.route('/user', methods=['POST'])
def create():
name = request.json['name']
username = request.json['username']
password = request.json['password']
user = User(
name,
username,
password
)
db.session.add(user)
db.session.commit()
return user_schema.jsonify(user)
| true | true |
f7f5f2f6e66691b739c6240d21fe317ff8fb95ce | 510 | py | Python | spacq/devices/rohde_schwarz/mock/tests/test_mock_smf100a.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | 1 | 2020-09-30T15:52:48.000Z | 2020-09-30T15:52:48.000Z | spacq/devices/rohde_schwarz/mock/tests/test_mock_smf100a.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | null | null | null | spacq/devices/rohde_schwarz/mock/tests/test_mock_smf100a.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | 3 | 2019-06-13T20:59:13.000Z | 2021-02-07T03:23:11.000Z | from unittest import main
from ... import smf100a
from .. import mock_smf100a
from ...tests.server.test_smf100a import SMF100ATest
# Don't lose the real device.
real_SMF100A = smf100a.SMF100A
is_mock = SMF100ATest.mock
def setup():
# Run the tests with a fake device.
smf100a.SMF100A = mock_smf100a.MockSMF100A
SMF100ATest.mock = True
def teardown():
# Restore the real device for any remaining tests.
smf100a.SMF100A = real_SMF100A
SMF100ATest.mock = is_mock
if __name__ == '__main__':
main()
| 18.888889 | 52 | 0.754902 | from unittest import main
from ... import smf100a
from .. import mock_smf100a
from ...tests.server.test_smf100a import SMF100ATest
real_SMF100A = smf100a.SMF100A
is_mock = SMF100ATest.mock
def setup():
# Run the tests with a fake device.
smf100a.SMF100A = mock_smf100a.MockSMF100A
SMF100ATest.mock = True
def teardown():
# Restore the real device for any remaining tests.
smf100a.SMF100A = real_SMF100A
SMF100ATest.mock = is_mock
if __name__ == '__main__':
main()
| true | true |
f7f5f32b1f82bfd1da20a8353dc12f669657358a | 749 | py | Python | benchmarks/time_render.py | bdice/conda-build | f12b838b827b2581925ca65f9a856e82dc467e0d | [
"BSD-3-Clause"
] | 1 | 2021-09-13T20:18:39.000Z | 2021-09-13T20:18:39.000Z | benchmarks/time_render.py | bdice/conda-build | f12b838b827b2581925ca65f9a856e82dc467e0d | [
"BSD-3-Clause"
] | 6 | 2016-07-05T19:08:39.000Z | 2017-10-23T10:59:14.000Z | benchmarks/time_render.py | mingwandroid/conda-build | 3543ae8220167a600b0a6816f529e15edf7e19b0 | [
"BSD-3-Clause"
] | 2 | 2020-01-07T08:34:25.000Z | 2020-01-07T09:10:29.000Z | import os
from conda_build import api
# god-awful hack to get data from the test recipes
import sys
_thisdir = os.path.dirname(__file__)
sys.path.append(os.path.dirname(_thisdir))
from tests.utils import metadata_dir
variant_dir = os.path.join(metadata_dir, '..', 'variants')
def time_simple_render():
api.render(os.path.join(metadata_dir, 'python_run'), finalize=False,
bypass_env_check=True)
def time_top_level_variant_render():
api.render(os.path.join(variant_dir, '02_python_version'), finalize=False,
bypass_env_check=True)
def time_single_top_level_multi_output():
api.render(os.path.join(variant_dir, 'test_python_as_subpackage_loop'),
finalize=False, bypass_env_check=True)
| 26.75 | 78 | 0.738318 | import os
from conda_build import api
import sys
_thisdir = os.path.dirname(__file__)
sys.path.append(os.path.dirname(_thisdir))
from tests.utils import metadata_dir
variant_dir = os.path.join(metadata_dir, '..', 'variants')
def time_simple_render():
api.render(os.path.join(metadata_dir, 'python_run'), finalize=False,
bypass_env_check=True)
def time_top_level_variant_render():
api.render(os.path.join(variant_dir, '02_python_version'), finalize=False,
bypass_env_check=True)
def time_single_top_level_multi_output():
api.render(os.path.join(variant_dir, 'test_python_as_subpackage_loop'),
finalize=False, bypass_env_check=True)
| true | true |
f7f5f3d8eaef67d7372d8c0b39b63c81881e5971 | 9,115 | py | Python | vsts/vsts/release/v4_0/models/release_environment.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/release/v4_0/models/release_environment.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/release/v4_0/models/release_environment.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseEnvironment(Model):
"""ReleaseEnvironment.
:param conditions: Gets list of conditions.
:type conditions: list of :class:`ReleaseCondition <release.v4_0.models.ReleaseCondition>`
:param created_on: Gets date on which it got created.
:type created_on: datetime
:param definition_environment_id: Gets definition environment id.
:type definition_environment_id: int
:param demands: Gets demands.
:type demands: list of :class:`object <release.v4_0.models.object>`
:param deploy_phases_snapshot: Gets list of deploy phases snapshot.
:type deploy_phases_snapshot: list of :class:`object <release.v4_0.models.object>`
:param deploy_steps: Gets deploy steps.
:type deploy_steps: list of :class:`DeploymentAttempt <release.v4_0.models.DeploymentAttempt>`
:param environment_options: Gets environment options.
:type environment_options: :class:`EnvironmentOptions <release.v4_0.models.EnvironmentOptions>`
:param id: Gets the unique identifier of this field.
:type id: int
:param modified_on: Gets date on which it got modified.
:type modified_on: datetime
:param name: Gets name.
:type name: str
:param next_scheduled_utc_time: Gets next scheduled UTC time.
:type next_scheduled_utc_time: datetime
:param owner: Gets the identity who is owner for release environment.
:type owner: :class:`IdentityRef <release.v4_0.models.IdentityRef>`
:param post_approvals_snapshot: Gets list of post deploy approvals snapshot.
:type post_approvals_snapshot: :class:`ReleaseDefinitionApprovals <release.v4_0.models.ReleaseDefinitionApprovals>`
:param post_deploy_approvals: Gets list of post deploy approvals.
:type post_deploy_approvals: list of :class:`ReleaseApproval <release.v4_0.models.ReleaseApproval>`
:param pre_approvals_snapshot: Gets list of pre deploy approvals snapshot.
:type pre_approvals_snapshot: :class:`ReleaseDefinitionApprovals <release.v4_0.models.ReleaseDefinitionApprovals>`
:param pre_deploy_approvals: Gets list of pre deploy approvals.
:type pre_deploy_approvals: list of :class:`ReleaseApproval <release.v4_0.models.ReleaseApproval>`
:param process_parameters: Gets process parameters.
:type process_parameters: :class:`ProcessParameters <release.v4_0.models.ProcessParameters>`
:param queue_id: Gets queue id.
:type queue_id: int
:param rank: Gets rank.
:type rank: int
:param release: Gets release reference which specifies the reference of the release to which this release environment is associated.
:type release: :class:`ReleaseShallowReference <release.v4_0.models.ReleaseShallowReference>`
:param release_created_by: Gets the identity who created release.
:type release_created_by: :class:`IdentityRef <release.v4_0.models.IdentityRef>`
:param release_definition: Gets releaseDefinitionReference which specifies the reference of the release definition to which this release environment is associated.
:type release_definition: :class:`ReleaseDefinitionShallowReference <release.v4_0.models.ReleaseDefinitionShallowReference>`
:param release_description: Gets release description.
:type release_description: str
:param release_id: Gets release id.
:type release_id: int
:param scheduled_deployment_time: Gets schedule deployment time of release environment.
:type scheduled_deployment_time: datetime
:param schedules: Gets list of schedules.
:type schedules: list of :class:`ReleaseSchedule <release.v4_0.models.ReleaseSchedule>`
:param status: Gets environment status.
:type status: object
:param time_to_deploy: Gets time to deploy.
:type time_to_deploy: float
:param trigger_reason: Gets trigger reason.
:type trigger_reason: str
:param variables: Gets the dictionary of variables.
:type variables: dict
:param workflow_tasks: Gets list of workflow tasks.
:type workflow_tasks: list of :class:`WorkflowTask <release.v4_0.models.WorkflowTask>`
"""
_attribute_map = {
'conditions': {'key': 'conditions', 'type': '[ReleaseCondition]'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'definition_environment_id': {'key': 'definitionEnvironmentId', 'type': 'int'},
'demands': {'key': 'demands', 'type': '[object]'},
'deploy_phases_snapshot': {'key': 'deployPhasesSnapshot', 'type': '[object]'},
'deploy_steps': {'key': 'deploySteps', 'type': '[DeploymentAttempt]'},
'environment_options': {'key': 'environmentOptions', 'type': 'EnvironmentOptions'},
'id': {'key': 'id', 'type': 'int'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'next_scheduled_utc_time': {'key': 'nextScheduledUtcTime', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'post_approvals_snapshot': {'key': 'postApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'post_deploy_approvals': {'key': 'postDeployApprovals', 'type': '[ReleaseApproval]'},
'pre_approvals_snapshot': {'key': 'preApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'pre_deploy_approvals': {'key': 'preDeployApprovals', 'type': '[ReleaseApproval]'},
'process_parameters': {'key': 'processParameters', 'type': 'ProcessParameters'},
'queue_id': {'key': 'queueId', 'type': 'int'},
'rank': {'key': 'rank', 'type': 'int'},
'release': {'key': 'release', 'type': 'ReleaseShallowReference'},
'release_created_by': {'key': 'releaseCreatedBy', 'type': 'IdentityRef'},
'release_definition': {'key': 'releaseDefinition', 'type': 'ReleaseDefinitionShallowReference'},
'release_description': {'key': 'releaseDescription', 'type': 'str'},
'release_id': {'key': 'releaseId', 'type': 'int'},
'scheduled_deployment_time': {'key': 'scheduledDeploymentTime', 'type': 'iso-8601'},
'schedules': {'key': 'schedules', 'type': '[ReleaseSchedule]'},
'status': {'key': 'status', 'type': 'object'},
'time_to_deploy': {'key': 'timeToDeploy', 'type': 'float'},
'trigger_reason': {'key': 'triggerReason', 'type': 'str'},
'variables': {'key': 'variables', 'type': '{ConfigurationVariableValue}'},
'workflow_tasks': {'key': 'workflowTasks', 'type': '[WorkflowTask]'}
}
def __init__(self, conditions=None, created_on=None, definition_environment_id=None, demands=None, deploy_phases_snapshot=None, deploy_steps=None, environment_options=None, id=None, modified_on=None, name=None, next_scheduled_utc_time=None, owner=None, post_approvals_snapshot=None, post_deploy_approvals=None, pre_approvals_snapshot=None, pre_deploy_approvals=None, process_parameters=None, queue_id=None, rank=None, release=None, release_created_by=None, release_definition=None, release_description=None, release_id=None, scheduled_deployment_time=None, schedules=None, status=None, time_to_deploy=None, trigger_reason=None, variables=None, workflow_tasks=None):
super(ReleaseEnvironment, self).__init__()
self.conditions = conditions
self.created_on = created_on
self.definition_environment_id = definition_environment_id
self.demands = demands
self.deploy_phases_snapshot = deploy_phases_snapshot
self.deploy_steps = deploy_steps
self.environment_options = environment_options
self.id = id
self.modified_on = modified_on
self.name = name
self.next_scheduled_utc_time = next_scheduled_utc_time
self.owner = owner
self.post_approvals_snapshot = post_approvals_snapshot
self.post_deploy_approvals = post_deploy_approvals
self.pre_approvals_snapshot = pre_approvals_snapshot
self.pre_deploy_approvals = pre_deploy_approvals
self.process_parameters = process_parameters
self.queue_id = queue_id
self.rank = rank
self.release = release
self.release_created_by = release_created_by
self.release_definition = release_definition
self.release_description = release_description
self.release_id = release_id
self.scheduled_deployment_time = scheduled_deployment_time
self.schedules = schedules
self.status = status
self.time_to_deploy = time_to_deploy
self.trigger_reason = trigger_reason
self.variables = variables
self.workflow_tasks = workflow_tasks
| 62.431507 | 669 | 0.696544 |
from msrest.serialization import Model
class ReleaseEnvironment(Model):
_attribute_map = {
'conditions': {'key': 'conditions', 'type': '[ReleaseCondition]'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'definition_environment_id': {'key': 'definitionEnvironmentId', 'type': 'int'},
'demands': {'key': 'demands', 'type': '[object]'},
'deploy_phases_snapshot': {'key': 'deployPhasesSnapshot', 'type': '[object]'},
'deploy_steps': {'key': 'deploySteps', 'type': '[DeploymentAttempt]'},
'environment_options': {'key': 'environmentOptions', 'type': 'EnvironmentOptions'},
'id': {'key': 'id', 'type': 'int'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'next_scheduled_utc_time': {'key': 'nextScheduledUtcTime', 'type': 'iso-8601'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'post_approvals_snapshot': {'key': 'postApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'post_deploy_approvals': {'key': 'postDeployApprovals', 'type': '[ReleaseApproval]'},
'pre_approvals_snapshot': {'key': 'preApprovalsSnapshot', 'type': 'ReleaseDefinitionApprovals'},
'pre_deploy_approvals': {'key': 'preDeployApprovals', 'type': '[ReleaseApproval]'},
'process_parameters': {'key': 'processParameters', 'type': 'ProcessParameters'},
'queue_id': {'key': 'queueId', 'type': 'int'},
'rank': {'key': 'rank', 'type': 'int'},
'release': {'key': 'release', 'type': 'ReleaseShallowReference'},
'release_created_by': {'key': 'releaseCreatedBy', 'type': 'IdentityRef'},
'release_definition': {'key': 'releaseDefinition', 'type': 'ReleaseDefinitionShallowReference'},
'release_description': {'key': 'releaseDescription', 'type': 'str'},
'release_id': {'key': 'releaseId', 'type': 'int'},
'scheduled_deployment_time': {'key': 'scheduledDeploymentTime', 'type': 'iso-8601'},
'schedules': {'key': 'schedules', 'type': '[ReleaseSchedule]'},
'status': {'key': 'status', 'type': 'object'},
'time_to_deploy': {'key': 'timeToDeploy', 'type': 'float'},
'trigger_reason': {'key': 'triggerReason', 'type': 'str'},
'variables': {'key': 'variables', 'type': '{ConfigurationVariableValue}'},
'workflow_tasks': {'key': 'workflowTasks', 'type': '[WorkflowTask]'}
}
def __init__(self, conditions=None, created_on=None, definition_environment_id=None, demands=None, deploy_phases_snapshot=None, deploy_steps=None, environment_options=None, id=None, modified_on=None, name=None, next_scheduled_utc_time=None, owner=None, post_approvals_snapshot=None, post_deploy_approvals=None, pre_approvals_snapshot=None, pre_deploy_approvals=None, process_parameters=None, queue_id=None, rank=None, release=None, release_created_by=None, release_definition=None, release_description=None, release_id=None, scheduled_deployment_time=None, schedules=None, status=None, time_to_deploy=None, trigger_reason=None, variables=None, workflow_tasks=None):
super(ReleaseEnvironment, self).__init__()
self.conditions = conditions
self.created_on = created_on
self.definition_environment_id = definition_environment_id
self.demands = demands
self.deploy_phases_snapshot = deploy_phases_snapshot
self.deploy_steps = deploy_steps
self.environment_options = environment_options
self.id = id
self.modified_on = modified_on
self.name = name
self.next_scheduled_utc_time = next_scheduled_utc_time
self.owner = owner
self.post_approvals_snapshot = post_approvals_snapshot
self.post_deploy_approvals = post_deploy_approvals
self.pre_approvals_snapshot = pre_approvals_snapshot
self.pre_deploy_approvals = pre_deploy_approvals
self.process_parameters = process_parameters
self.queue_id = queue_id
self.rank = rank
self.release = release
self.release_created_by = release_created_by
self.release_definition = release_definition
self.release_description = release_description
self.release_id = release_id
self.scheduled_deployment_time = scheduled_deployment_time
self.schedules = schedules
self.status = status
self.time_to_deploy = time_to_deploy
self.trigger_reason = trigger_reason
self.variables = variables
self.workflow_tasks = workflow_tasks
| true | true |
f7f5f3e176e46aa576fbd0625f24ac72a0e7c51a | 15,503 | py | Python | tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py | gayatripk1/tvm | 8bf6cd5800daaf42935fd69cbd63180c97bef262 | [
"Apache-2.0"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py | gayatripk1/tvm | 8bf6cd5800daaf42935fd69cbd63180c97bef262 | [
"Apache-2.0"
] | 2,863 | 2017-08-17T19:55:50.000Z | 2019-11-04T17:18:41.000Z | tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py | gayatripk1/tvm | 8bf6cd5800daaf42935fd69cbd63180c97bef262 | [
"Apache-2.0"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import os.path
import sys
import pytest
import numpy as np
import logging
import tempfile
import tvm.testing
import tvm.script
from tvm.script import tir as T
from tvm import te
from tvm.contrib.hexagon.build import HexagonLauncherRPC
from . import benchmark_util as bu
_SHOULD_SKIP_BENCHMARKS, _SKIP_BENCHMARKS_REASON = bu.skip_bencharks_flag_and_reason()
# This is a fixed detail of the v68 architecture.
HVX_VECTOR_BYTES = 128
_HEXAGON_TARGET = tvm.target.hexagon("v69", link_params=True)
_SUPER_TARGET = tvm.target.Target(_HEXAGON_TARGET, host=_HEXAGON_TARGET)
# NOTE on server ports:
# These tests use different port numbers for the RPC server (7070 + ...).
# The reason is that an RPC session cannot be gracefully closed without
# triggering TIME_WAIT state on the server socket. This prevents another
# server to bind to the same port until the wait time elapses.
_BT = bu.BenchmarksTable()
_CSV_COLUMN_ORDER = [
# Identifies which TE-compute / TIRScript is used as the basis for the
# benchmarked primfunc. Only needs to be meaningful to humans.
"basic_kernel",
# The tensors' element type
"dtype",
# When applicable, indicates the particular variation of schedules
# apply by the Python code. Decoding this may require looking at this
# script's source code.
"sched_type",
# The memory location of the tensors used during the execution of
# the primfunc. We currently assume just one location.
# This will likely need to be generalized as we add more sophisticated
# primfuncs.
"mem_scope",
# For primfuncs that treat tensor buffers as collections of 1D vectors,
# this is the number of vectors in each tensor.
# This will likely need to be generalized as we add more sophisticated
# primfuncs.
"num_vectors_per_tensor",
# Reserved columns defined by the BenchmarksTable class.
"row_status",
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_mean_usecs",
"timings_stddev_usecs",
# For benchmarks that produce files on the host file system, this indicates
# their location. Useful for post-mortem investigation of benchmark results.
"host_files_dir_path",
# Miscellaneous comments about the benchmark.
"comments",
]
_HOST_OUTPUT_DIR = tempfile.mkdtemp()
_PRIMFUNC_NAME = "elemwise_add"
print("-" * 80)
print("OUTPUT DIRECTORY: {}".format(_HOST_OUTPUT_DIR))
print("-" * 80)
print()
from typing import Tuple
def _get_irmod_elemwise_add(
_PRIMFUNC_NAME: str, shape: list, dtype: str, mem_scope: str
) -> tvm.ir.module.IRModule:
"""
Return an IRModule containing a single primfunc, expressed as NS-TIR.
The primfunc implements elementwise-add. Its signature is (A,B,C), where
A and B are the input tensors, and C is the output tensor.
All three tensors have the specfied shape, dtype, and mem_scope.
If the specified primfunc is known to be unsupported, raise an UnsupportedExcetion.
"""
assert len(shape) == 2
# TVMScript can reference simple Python variables, but it doesn't
# curently support more complex Python expressions...
(
dim0_size,
dim1_size,
) = shape
dtype_str = str(dtype)
if mem_scope == "global.vtcm":
raise bu.UnsupportedException("This benchmark kernel does not yet support VTCM buffers.")
# This check is currently elided by the one above, but it should become relevant as soon
# as we add VTCM support to this kernel generator.
#
# Also: The VTCM budget is a very rough estimate, based only on experience.
# Assuming that it's even reasonable to use a hard-coded estimate AT ALL, this number
# may need tweaking.
estimated_vtcm_budget_bytes = HVX_VECTOR_BYTES * 1024
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
num_vtcm_tensors = 3
estimated_vtcm_needed_bytes = shape[0] * shape[1] * dtype_bytes * num_vtcm_tensors
if estimated_vtcm_needed_bytes > estimated_vtcm_budget_bytes:
raise bu.UnsupportedException("Expect to exceed VTCM budget.")
@tvm.script.ir_module
class BenchmarkModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
# We exchange data between function by handles, which are similar to pointer.
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, shape, dtype=dtype)
B = T.match_buffer(b, shape, dtype=dtype)
C = T.match_buffer(c, shape, dtype=dtype)
for i in range(dim0_size):
for j in range(dim1_size):
C[i, j] = A[i, j] + B[i, j]
return BenchmarkModule
def _benchmark_hexagon_elementwise_add_kernel(
hexagon_launcher: HexagonLauncherRPC, shape: list, dtype: str, mem_scope: str
):
"""
Generate and benchmark a single elementwise-add kernel for Hexagon.
Produce these outputs:
- Printed status updates / results to stdout and/or stderr.
- Create a new subdirectory under _HOST_OUTPUT_DIR, and populate it with
various logs and intermediate files.
- Add to _BT a row describing this benchmark run.
"""
# Represent the benchmark details in a form required by the benchmark table
# and for other logging...
keys_dict = {
"basic_kernel": "ewise-add",
"dtype": dtype,
"shape": shape,
"mem_scope": mem_scope,
}
desc = bu.get_benchmark_decription(keys_dict)
# Create the host-side directory for this benchmark run's files / logs...
host_files_dir_name = bu.get_benchmark_id(keys_dict)
host_files_dir_path = os.path.join(_HOST_OUTPUT_DIR, host_files_dir_name)
os.mkdir(host_files_dir_path)
keys_dict["host_files_dir_path"] = host_files_dir_path
log_file_path = os.path.join(host_files_dir_path, "out.txt")
with open(log_file_path, "w") as log_file:
print(f"CONFIGURATION: {desc}")
log_file.write(f"CONFIGURATION: {desc}\n")
try:
ns_tir_module = _get_irmod_elemwise_add(_PRIMFUNC_NAME, shape, dtype, mem_scope)
# Dump the primfunc NS-TIR (as text) to the log file...
lowered_mod = tvm.lower(ns_tir_module, _PRIMFUNC_NAME)
log_file.write("LOWERED IR MODULE:\n")
log_file.write(str(lowered_mod))
log_file.write("\n")
# Lower the primfunc's IRModule to Hexagon object code...
A = tvm.te.placeholder(shape, dtype=dtype)
B = tvm.te.placeholder(shape, dtype=dtype)
C = tvm.te.placeholder(shape, dtype=dtype)
built_module: tvm.driver.build_module.OperatorModule = tvm.build(
ns_tir_module,
[
A,
B,
C,
],
_SUPER_TARGET,
name=_PRIMFUNC_NAME,
)
# Create an actual Hexagon-native shared object file, initially stored on the
# host's file system...
host_dso_binary_path = os.path.join(host_files_dir_path, "test_binary.so")
built_module.save(host_dso_binary_path)
print(f"SAVED BINARY TO HOST PATH: {host_dso_binary_path}")
# Upload the .so to the Android device's file system (or wherever is appropriate
# when using the Hexagon simulator)...
target_dso_binary_filename = "test_binary.so"
target_dso_binary_pathname = hexagon_launcher.upload(
host_dso_binary_path, target_dso_binary_filename
)
# Generate our testing / validation data...
(
host_numpy_A_data,
host_numpy_B_data,
host_numpy_C_data_expected,
) = _get_elemwise_add_reference_value_tensors(shape, dtype)
with hexagon_launcher.start_session() as sess:
# On the target device / simulator, make our Hexagon-native shared object
# available for use...
loaded_hexagon_module: tvm.runtime.module.Module = hexagon_launcher.load_module(
target_dso_binary_pathname, sess
)
# Create the target-side tensors to hold the primfunc's inputs and outputs...
A_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
B_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
C_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
# Populate the primfunc's input tensors...
A_data.copyfrom(host_numpy_A_data)
B_data.copyfrom(host_numpy_B_data)
# Actually benchmark the primfunc...
timer = loaded_hexagon_module.time_evaluator(
"main", sess.device, number=10, repeat=1
)
timing_result = timer(A_data, B_data, C_data)
print(f"TIMING RESULT: {timing_result}")
log_file.write(f"TIMING RESULT: {timing_result}\n")
# Verify that the computation actually happened, and produced the correct result.
result = C_data.numpy()
if dtype == "float16":
# These are the closest tolerance we currently expect / require for these
# kernels. They may be changed in the future.
rel_tolerance = 0.005
abs_tolerance = 2.0
elif dtype == "int8":
rel_tolerance = 0
abs_tolerance = 0
else:
raise Exception(f"Unexpected dtype: {dtype}")
# TODO: We're assuming that *any* assertion thrown by 'assert_allclose' is because
# the numerical differences were too large. But ideally this code would
# differentiate between (a) numerical difference errors, which should simply be
# recorded as a failed benchmark run, vs. (b) more serious errors that should
# kill the overall script.
try:
tvm.testing.assert_allclose(
result, host_numpy_C_data_expected, rel_tolerance, abs_tolerance
)
except AssertionError as e:
raise bu.NumericalAccuracyException(str(e))
_BT.record_success(timing_result, **keys_dict)
except bu.NumericalAccuracyException as e:
print()
print(f"FAIL: Numerical accuracy error. See log file.")
log_file.write("\n")
log_file.write(f"FAIL: {e}\n")
_BT.record_fail(**keys_dict, comments=f"Numerical accuracy error. See log file.")
except bu.UnsupportedException as e:
print()
print(f"SKIP: {e}")
log_file.write("\n")
log_file.write(f"SKIP: {e}\n")
_BT.record_skip(**keys_dict, comments=f"Unsupported configuration: {e}")
def _get_elemwise_add_reference_value_tensors(shape: list, dtype: str):
"""
Return [A:np.array, B:np.array, C:np.array]
`A`, `B`, and `C` are reference data used to exercise and validate
an elementwise-add kernel: C = A+B.
NOTE: These data are primarily meant for performance testing.
The values may be helpful in detecting correctness issues, but that's
a secondary consideration here.
"""
assert len(shape) == 2
A = np.ndarray(shape, dtype=dtype)
B = np.ndarray(shape, dtype=dtype)
np_dtype = A.dtype
if np_dtype.kind in ["i", "u"]:
# We allow overflow for integer types because it tends to be well-behaved
# and well-understood...
min_value = np.iinfo(np_dtype).min
max_value = np.iinfo(np_dtype).max
next_value = min_value
for i in range(shape[0]):
for j in range(shape[1]):
A[i, j] = next_value
B[i, j] = next_value * 2
next_value += 1
elif np_dtype.kind == "f":
# NOTE: For simplicity, we avoid test data that that require
# well-defined behavior on floating-point overflow.
# But it may be reasonable to test that in the future.
min_value = np.finfo(np_dtype).min
max_value = np.finfo(np_dtype).max
min_input_value = min_value / 2.0 + 1
max_input_value = max_value / 2.0 - 2
delta = (max_input_value - min_input_value) / (shape[0] * shape[1])
next_value = min_input_value
for i in range(shape[0]):
for j in range(shape[1]):
A[i, j] = next_value
B[i, j] = next_value + 1
next_value += delta
else:
assert False, f"Unexpected data type: {np_dtype}"
C = A + B
return [
A,
B,
C,
]
@pytest.mark.skipif(_SHOULD_SKIP_BENCHMARKS, reason=_SKIP_BENCHMARKS_REASON)
@tvm.testing.requires_hexagon
def test_elemwise_add(hexagon_launcher: HexagonLauncherRPC):
for dtype in [
"int8",
"float16",
]:
for mem_scope in [
"global",
"global.vtcm",
]:
# These numbers are fairly arbitrary, but they're meant to stress memory/caches to
# various extents.
for num_vectors_per_tensor in [
1,
16,
64,
512,
2048,
]:
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
elem_per_hvx_vector = HVX_VECTOR_BYTES // dtype_bytes
shape = [
num_vectors_per_tensor,
elem_per_hvx_vector,
]
print()
_benchmark_hexagon_elementwise_add_kernel(hexagon_launcher, shape, dtype, mem_scope)
print("-" * 80)
print(f"OUTPUT DIRECTORY: {_HOST_OUTPUT_DIR}")
print("-" * 80)
print()
tabular_output_filename = os.path.join(_HOST_OUTPUT_DIR, "benchmark-results.csv")
with open(tabular_output_filename, "w") as csv_file:
_BT.print_csv(csv_file, _CSV_COLUMN_ORDER)
print(f"BENCHMARK RESULTS FILE: {tabular_output_filename}")
_BT.print_csv(sys.stdout, _CSV_COLUMN_ORDER)
if _BT.has_fail() > 0:
pytest.fail("At least one benchmark configuration failed", pytrace=False)
if __name__ == "__main__":
tvm.testing.main()
| 36.137529 | 100 | 0.632329 |
import os
import os.path
import sys
import pytest
import numpy as np
import logging
import tempfile
import tvm.testing
import tvm.script
from tvm.script import tir as T
from tvm import te
from tvm.contrib.hexagon.build import HexagonLauncherRPC
from . import benchmark_util as bu
_SHOULD_SKIP_BENCHMARKS, _SKIP_BENCHMARKS_REASON = bu.skip_bencharks_flag_and_reason()
HVX_VECTOR_BYTES = 128
_HEXAGON_TARGET = tvm.target.hexagon("v69", link_params=True)
_SUPER_TARGET = tvm.target.Target(_HEXAGON_TARGET, host=_HEXAGON_TARGET)
_BT = bu.BenchmarksTable()
_CSV_COLUMN_ORDER = [
"basic_kernel",
"dtype",
# When applicable, indicates the particular variation of schedules
# apply by the Python code. Decoding this may require looking at this
# script's source code.
"sched_type",
"mem_scope",
"num_vectors_per_tensor",
"row_status",
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_mean_usecs",
"timings_stddev_usecs",
"host_files_dir_path",
"comments",
]
_HOST_OUTPUT_DIR = tempfile.mkdtemp()
_PRIMFUNC_NAME = "elemwise_add"
print("-" * 80)
print("OUTPUT DIRECTORY: {}".format(_HOST_OUTPUT_DIR))
print("-" * 80)
print()
from typing import Tuple
def _get_irmod_elemwise_add(
_PRIMFUNC_NAME: str, shape: list, dtype: str, mem_scope: str
) -> tvm.ir.module.IRModule:
assert len(shape) == 2
# curently support more complex Python expressions...
(
dim0_size,
dim1_size,
) = shape
dtype_str = str(dtype)
if mem_scope == "global.vtcm":
raise bu.UnsupportedException("This benchmark kernel does not yet support VTCM buffers.")
# This check is currently elided by the one above, but it should become relevant as soon
# as we add VTCM support to this kernel generator.
#
# Also: The VTCM budget is a very rough estimate, based only on experience.
# Assuming that it's even reasonable to use a hard-coded estimate AT ALL, this number
estimated_vtcm_budget_bytes = HVX_VECTOR_BYTES * 1024
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
num_vtcm_tensors = 3
estimated_vtcm_needed_bytes = shape[0] * shape[1] * dtype_bytes * num_vtcm_tensors
if estimated_vtcm_needed_bytes > estimated_vtcm_budget_bytes:
raise bu.UnsupportedException("Expect to exceed VTCM budget.")
@tvm.script.ir_module
class BenchmarkModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, shape, dtype=dtype)
B = T.match_buffer(b, shape, dtype=dtype)
C = T.match_buffer(c, shape, dtype=dtype)
for i in range(dim0_size):
for j in range(dim1_size):
C[i, j] = A[i, j] + B[i, j]
return BenchmarkModule
def _benchmark_hexagon_elementwise_add_kernel(
hexagon_launcher: HexagonLauncherRPC, shape: list, dtype: str, mem_scope: str
):
keys_dict = {
"basic_kernel": "ewise-add",
"dtype": dtype,
"shape": shape,
"mem_scope": mem_scope,
}
desc = bu.get_benchmark_decription(keys_dict)
host_files_dir_name = bu.get_benchmark_id(keys_dict)
host_files_dir_path = os.path.join(_HOST_OUTPUT_DIR, host_files_dir_name)
os.mkdir(host_files_dir_path)
keys_dict["host_files_dir_path"] = host_files_dir_path
log_file_path = os.path.join(host_files_dir_path, "out.txt")
with open(log_file_path, "w") as log_file:
print(f"CONFIGURATION: {desc}")
log_file.write(f"CONFIGURATION: {desc}\n")
try:
ns_tir_module = _get_irmod_elemwise_add(_PRIMFUNC_NAME, shape, dtype, mem_scope)
# Dump the primfunc NS-TIR (as text) to the log file...
lowered_mod = tvm.lower(ns_tir_module, _PRIMFUNC_NAME)
log_file.write("LOWERED IR MODULE:\n")
log_file.write(str(lowered_mod))
log_file.write("\n")
# Lower the primfunc's IRModule to Hexagon object code...
A = tvm.te.placeholder(shape, dtype=dtype)
B = tvm.te.placeholder(shape, dtype=dtype)
C = tvm.te.placeholder(shape, dtype=dtype)
built_module: tvm.driver.build_module.OperatorModule = tvm.build(
ns_tir_module,
[
A,
B,
C,
],
_SUPER_TARGET,
name=_PRIMFUNC_NAME,
)
host_dso_binary_path = os.path.join(host_files_dir_path, "test_binary.so")
built_module.save(host_dso_binary_path)
print(f"SAVED BINARY TO HOST PATH: {host_dso_binary_path}")
# Upload the .so to the Android device's file system (or wherever is appropriate
target_dso_binary_filename = "test_binary.so"
target_dso_binary_pathname = hexagon_launcher.upload(
host_dso_binary_path, target_dso_binary_filename
)
(
host_numpy_A_data,
host_numpy_B_data,
host_numpy_C_data_expected,
) = _get_elemwise_add_reference_value_tensors(shape, dtype)
with hexagon_launcher.start_session() as sess:
loaded_hexagon_module: tvm.runtime.module.Module = hexagon_launcher.load_module(
target_dso_binary_pathname, sess
)
A_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
B_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
C_data = tvm.nd.empty(shape, dtype, sess.device, mem_scope)
# Populate the primfunc's input tensors...
A_data.copyfrom(host_numpy_A_data)
B_data.copyfrom(host_numpy_B_data)
timer = loaded_hexagon_module.time_evaluator(
"main", sess.device, number=10, repeat=1
)
timing_result = timer(A_data, B_data, C_data)
print(f"TIMING RESULT: {timing_result}")
log_file.write(f"TIMING RESULT: {timing_result}\n")
result = C_data.numpy()
if dtype == "float16":
rel_tolerance = 0.005
abs_tolerance = 2.0
elif dtype == "int8":
rel_tolerance = 0
abs_tolerance = 0
else:
raise Exception(f"Unexpected dtype: {dtype}")
# the numerical differences were too large. But ideally this code would
# differentiate between (a) numerical difference errors, which should simply be
# recorded as a failed benchmark run, vs. (b) more serious errors that should
# kill the overall script.
try:
tvm.testing.assert_allclose(
result, host_numpy_C_data_expected, rel_tolerance, abs_tolerance
)
except AssertionError as e:
raise bu.NumericalAccuracyException(str(e))
_BT.record_success(timing_result, **keys_dict)
except bu.NumericalAccuracyException as e:
print()
print(f"FAIL: Numerical accuracy error. See log file.")
log_file.write("\n")
log_file.write(f"FAIL: {e}\n")
_BT.record_fail(**keys_dict, comments=f"Numerical accuracy error. See log file.")
except bu.UnsupportedException as e:
print()
print(f"SKIP: {e}")
log_file.write("\n")
log_file.write(f"SKIP: {e}\n")
_BT.record_skip(**keys_dict, comments=f"Unsupported configuration: {e}")
def _get_elemwise_add_reference_value_tensors(shape: list, dtype: str):
assert len(shape) == 2
A = np.ndarray(shape, dtype=dtype)
B = np.ndarray(shape, dtype=dtype)
np_dtype = A.dtype
if np_dtype.kind in ["i", "u"]:
# We allow overflow for integer types because it tends to be well-behaved
# and well-understood...
min_value = np.iinfo(np_dtype).min
max_value = np.iinfo(np_dtype).max
next_value = min_value
for i in range(shape[0]):
for j in range(shape[1]):
A[i, j] = next_value
B[i, j] = next_value * 2
next_value += 1
elif np_dtype.kind == "f":
# NOTE: For simplicity, we avoid test data that that require
# well-defined behavior on floating-point overflow.
# But it may be reasonable to test that in the future.
min_value = np.finfo(np_dtype).min
max_value = np.finfo(np_dtype).max
min_input_value = min_value / 2.0 + 1
max_input_value = max_value / 2.0 - 2
delta = (max_input_value - min_input_value) / (shape[0] * shape[1])
next_value = min_input_value
for i in range(shape[0]):
for j in range(shape[1]):
A[i, j] = next_value
B[i, j] = next_value + 1
next_value += delta
else:
assert False, f"Unexpected data type: {np_dtype}"
C = A + B
return [
A,
B,
C,
]
@pytest.mark.skipif(_SHOULD_SKIP_BENCHMARKS, reason=_SKIP_BENCHMARKS_REASON)
@tvm.testing.requires_hexagon
def test_elemwise_add(hexagon_launcher: HexagonLauncherRPC):
for dtype in [
"int8",
"float16",
]:
for mem_scope in [
"global",
"global.vtcm",
]:
# These numbers are fairly arbitrary, but they're meant to stress memory/caches to
for num_vectors_per_tensor in [
1,
16,
64,
512,
2048,
]:
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
elem_per_hvx_vector = HVX_VECTOR_BYTES // dtype_bytes
shape = [
num_vectors_per_tensor,
elem_per_hvx_vector,
]
print()
_benchmark_hexagon_elementwise_add_kernel(hexagon_launcher, shape, dtype, mem_scope)
print("-" * 80)
print(f"OUTPUT DIRECTORY: {_HOST_OUTPUT_DIR}")
print("-" * 80)
print()
tabular_output_filename = os.path.join(_HOST_OUTPUT_DIR, "benchmark-results.csv")
with open(tabular_output_filename, "w") as csv_file:
_BT.print_csv(csv_file, _CSV_COLUMN_ORDER)
print(f"BENCHMARK RESULTS FILE: {tabular_output_filename}")
_BT.print_csv(sys.stdout, _CSV_COLUMN_ORDER)
if _BT.has_fail() > 0:
pytest.fail("At least one benchmark configuration failed", pytrace=False)
if __name__ == "__main__":
tvm.testing.main()
| true | true |
f7f5f4c038040284d3c8fafbf7385c60829b0f9a | 2,333 | py | Python | appliances/main.py | Aleksey-Voko/Avito_autoload | c94851f2f5d0f0903321c6a2ad4c8a3d94421791 | [
"MIT"
] | 2 | 2022-01-12T12:29:57.000Z | 2022-01-13T05:17:35.000Z | appliances/main.py | Aleksey-Voko/Avito_autoload | c94851f2f5d0f0903321c6a2ad4c8a3d94421791 | [
"MIT"
] | null | null | null | appliances/main.py | Aleksey-Voko/Avito_autoload | c94851f2f5d0f0903321c6a2ad4c8a3d94421791 | [
"MIT"
] | 1 | 2022-01-12T12:29:58.000Z | 2022-01-12T12:29:58.000Z | from datetime import datetime, timedelta
from pathlib import Path
from pytils.translit import slugify
from appliances.data import (COMPANY_NAME, MANAGER_NAME, CONTACT_PHONE,
ADDRESS, CATEGORY, GOODS_TYPE, AD_TYPE,
get_description, CONDITION, PHOTO_STORAGE,
START_TIME)
from appliances.root_xml import save_root_xml
from utils import (get_list_of_dicts_from_csv_file, get_datetime,
get_repr_world_time)
def main():
ad_dict_list = []
products = get_list_of_dicts_from_csv_file('Товары.csv')
current_date = get_datetime(START_TIME)
for products in products:
if len(products['Заголовок']) < 20:
title = f'Уплотнитель двери холодильника {products["Заголовок"]}'
elif len(products['Заголовок']) < 26:
title = f'Уплотнитель холодильника {products["Заголовок"]}'
else:
title = f'Уплотнитель для {products["Заголовок"]}'
images = [''.join([PHOTO_STORAGE, x])
for x in products['Ссылки на картинки'].split(', ')]
ad_dict_list.append(
{
'Id': f'{datetime.now().strftime("%Y-%m")}-{slugify(title)}',
'DateBegin': get_repr_world_time(current_date),
'ListingFee': 'Package',
'AdStatus': 'Free',
'ManagerName': MANAGER_NAME,
'ContactPhone': CONTACT_PHONE,
'Address': ADDRESS,
'Category': CATEGORY,
'GoodsType': GOODS_TYPE,
'AdType': AD_TYPE,
'Title': title,
'Description': get_description(products['Заголовок']),
'Price': products['Цена'],
'Condition': CONDITION,
'Images': images,
}
)
current_date += timedelta(minutes=45)
if current_date.hour >= 20 and current_date.minute > 0:
day = current_date.day + 1
current_date = current_date.replace(day=day, hour=8, minute=0)
now = datetime.now().strftime('%d-%m-%Y')
file_name = ''.join([slugify(COMPANY_NAME), '.', now, '.xml'])
file_path = Path('out_xml') / file_name
save_root_xml(file_path, ad_dict_list)
if __name__ == '__main__':
main()
| 35.892308 | 77 | 0.572225 | from datetime import datetime, timedelta
from pathlib import Path
from pytils.translit import slugify
from appliances.data import (COMPANY_NAME, MANAGER_NAME, CONTACT_PHONE,
ADDRESS, CATEGORY, GOODS_TYPE, AD_TYPE,
get_description, CONDITION, PHOTO_STORAGE,
START_TIME)
from appliances.root_xml import save_root_xml
from utils import (get_list_of_dicts_from_csv_file, get_datetime,
get_repr_world_time)
def main():
ad_dict_list = []
products = get_list_of_dicts_from_csv_file('Товары.csv')
current_date = get_datetime(START_TIME)
for products in products:
if len(products['Заголовок']) < 20:
title = f'Уплотнитель двери холодильника {products["Заголовок"]}'
elif len(products['Заголовок']) < 26:
title = f'Уплотнитель холодильника {products["Заголовок"]}'
else:
title = f'Уплотнитель для {products["Заголовок"]}'
images = [''.join([PHOTO_STORAGE, x])
for x in products['Ссылки на картинки'].split(', ')]
ad_dict_list.append(
{
'Id': f'{datetime.now().strftime("%Y-%m")}-{slugify(title)}',
'DateBegin': get_repr_world_time(current_date),
'ListingFee': 'Package',
'AdStatus': 'Free',
'ManagerName': MANAGER_NAME,
'ContactPhone': CONTACT_PHONE,
'Address': ADDRESS,
'Category': CATEGORY,
'GoodsType': GOODS_TYPE,
'AdType': AD_TYPE,
'Title': title,
'Description': get_description(products['Заголовок']),
'Price': products['Цена'],
'Condition': CONDITION,
'Images': images,
}
)
current_date += timedelta(minutes=45)
if current_date.hour >= 20 and current_date.minute > 0:
day = current_date.day + 1
current_date = current_date.replace(day=day, hour=8, minute=0)
now = datetime.now().strftime('%d-%m-%Y')
file_name = ''.join([slugify(COMPANY_NAME), '.', now, '.xml'])
file_path = Path('out_xml') / file_name
save_root_xml(file_path, ad_dict_list)
if __name__ == '__main__':
main()
| true | true |
f7f5f56d0ed5ebf08fd9e820536d724b22f301ad | 3,979 | py | Python | mypy/patterns.py | cibinmathew/mypy | 49825a9057d8c52603e91f6b99e4de94ca3d8a66 | [
"PSF-2.0"
] | 4 | 2022-01-21T08:35:40.000Z | 2022-02-18T07:19:38.000Z | mypy/patterns.py | cibinmathew/mypy | 49825a9057d8c52603e91f6b99e4de94ca3d8a66 | [
"PSF-2.0"
] | 183 | 2021-10-21T23:30:15.000Z | 2022-03-29T00:05:26.000Z | mypy/patterns.py | cibinmathew/mypy | 49825a9057d8c52603e91f6b99e4de94ca3d8a66 | [
"PSF-2.0"
] | 3 | 2021-11-11T00:35:34.000Z | 2022-01-18T07:33:23.000Z | """Classes for representing match statement patterns."""
from typing import TypeVar, List, Optional, Union
from mypy_extensions import trait
from mypy.nodes import Node, RefExpr, NameExpr, Expression
from mypy.visitor import PatternVisitor
T = TypeVar('T')
@trait
class Pattern(Node):
"""A pattern node."""
__slots__ = ()
def accept(self, visitor: PatternVisitor[T]) -> T:
raise RuntimeError('Not implemented')
class AsPattern(Pattern):
"""The pattern <pattern> as <name>"""
# The python ast, and therefore also our ast merges capture, wildcard and as patterns into one
# for easier handling.
# If pattern is None this is a capture pattern. If name and pattern are both none this is a
# wildcard pattern.
# Only name being None should not happen but also won't break anything.
pattern: Optional[Pattern]
name: Optional[NameExpr]
def __init__(self, pattern: Optional[Pattern], name: Optional[NameExpr]) -> None:
super().__init__()
self.pattern = pattern
self.name = name
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_as_pattern(self)
class OrPattern(Pattern):
"""The pattern <pattern> | <pattern> | ..."""
patterns: List[Pattern]
def __init__(self, patterns: List[Pattern]) -> None:
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_or_pattern(self)
class ValuePattern(Pattern):
"""The pattern x.y (or x.y.z, ...)"""
expr: Expression
def __init__(self, expr: Expression):
super().__init__()
self.expr = expr
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_value_pattern(self)
class SingletonPattern(Pattern):
# This can be exactly True, False or None
value: Union[bool, None]
def __init__(self, value: Union[bool, None]):
super().__init__()
self.value = value
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_singleton_pattern(self)
class SequencePattern(Pattern):
"""The pattern [<pattern>, ...]"""
patterns: List[Pattern]
def __init__(self, patterns: List[Pattern]):
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_sequence_pattern(self)
class StarredPattern(Pattern):
# None corresponds to *_ in a list pattern. It will match multiple items but won't bind them to
# a name.
capture: Optional[NameExpr]
def __init__(self, capture: Optional[NameExpr]):
super().__init__()
self.capture = capture
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_starred_pattern(self)
class MappingPattern(Pattern):
keys: List[Expression]
values: List[Pattern]
rest: Optional[NameExpr]
def __init__(self, keys: List[Expression], values: List[Pattern],
rest: Optional[NameExpr]):
super().__init__()
assert len(keys) == len(values)
self.keys = keys
self.values = values
self.rest = rest
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_mapping_pattern(self)
class ClassPattern(Pattern):
"""The pattern Cls(...)"""
class_ref: RefExpr
positionals: List[Pattern]
keyword_keys: List[str]
keyword_values: List[Pattern]
def __init__(self, class_ref: RefExpr, positionals: List[Pattern], keyword_keys: List[str],
keyword_values: List[Pattern]):
super().__init__()
assert len(keyword_keys) == len(keyword_values)
self.class_ref = class_ref
self.positionals = positionals
self.keyword_keys = keyword_keys
self.keyword_values = keyword_values
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_class_pattern(self)
| 28.833333 | 99 | 0.660216 | from typing import TypeVar, List, Optional, Union
from mypy_extensions import trait
from mypy.nodes import Node, RefExpr, NameExpr, Expression
from mypy.visitor import PatternVisitor
T = TypeVar('T')
@trait
class Pattern(Node):
__slots__ = ()
def accept(self, visitor: PatternVisitor[T]) -> T:
raise RuntimeError('Not implemented')
class AsPattern(Pattern):
pattern: Optional[Pattern]
name: Optional[NameExpr]
def __init__(self, pattern: Optional[Pattern], name: Optional[NameExpr]) -> None:
super().__init__()
self.pattern = pattern
self.name = name
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_as_pattern(self)
class OrPattern(Pattern):
patterns: List[Pattern]
def __init__(self, patterns: List[Pattern]) -> None:
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_or_pattern(self)
class ValuePattern(Pattern):
expr: Expression
def __init__(self, expr: Expression):
super().__init__()
self.expr = expr
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_value_pattern(self)
class SingletonPattern(Pattern):
# This can be exactly True, False or None
value: Union[bool, None]
def __init__(self, value: Union[bool, None]):
super().__init__()
self.value = value
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_singleton_pattern(self)
class SequencePattern(Pattern):
patterns: List[Pattern]
def __init__(self, patterns: List[Pattern]):
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_sequence_pattern(self)
class StarredPattern(Pattern):
# None corresponds to *_ in a list pattern. It will match multiple items but won't bind them to
capture: Optional[NameExpr]
def __init__(self, capture: Optional[NameExpr]):
super().__init__()
self.capture = capture
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_starred_pattern(self)
class MappingPattern(Pattern):
keys: List[Expression]
values: List[Pattern]
rest: Optional[NameExpr]
def __init__(self, keys: List[Expression], values: List[Pattern],
rest: Optional[NameExpr]):
super().__init__()
assert len(keys) == len(values)
self.keys = keys
self.values = values
self.rest = rest
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_mapping_pattern(self)
class ClassPattern(Pattern):
class_ref: RefExpr
positionals: List[Pattern]
keyword_keys: List[str]
keyword_values: List[Pattern]
def __init__(self, class_ref: RefExpr, positionals: List[Pattern], keyword_keys: List[str],
keyword_values: List[Pattern]):
super().__init__()
assert len(keyword_keys) == len(keyword_values)
self.class_ref = class_ref
self.positionals = positionals
self.keyword_keys = keyword_keys
self.keyword_values = keyword_values
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_class_pattern(self)
| true | true |
f7f5f57afe4d606a3ea72be29d94d51dd8a1a03a | 12,204 | py | Python | .history/src/Simulador_20200710105647.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200710105647.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200710105647.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_curados = []
self.individuos_mortos = []
self.lista_matrizes_posicionamento = []
self.matriz_status = csr_matrix((tamanho_matriz,tamanho_matriz), dtype=np.int8)
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice[0], indice[1])
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for vizinho in lista_vizinhos:
x = vizinho[0]
y = vizinho[1]
#verificação de SADIO
if self.matriz_status[x,y] == Individuo.SADIO:
#verificação do novo status
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].infectar()
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_1
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_2
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_mortos = []
for indice in lista_infectantes_tipo2:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_morte()
if novo_status == Individuo.MORTO:
lista_mortos.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.MORTO
return lista_mortos
def verificar_cura(self, lista_infectantes):
lista_curados = []
for indice in lista_infectantes:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_cura()
if novo_status == Individuo.CURADO:
lista_curados.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.CURADO
return lista_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1_1, lista_novos_infectados_tipo2_1 = self.verificar_infeccao(self.individuos_infectados_tipo_1)
lista_novos_infectados_tipo1_2, lista_novos_infectados_tipo2_2 = self.verificar_infeccao(self.individuos_infectados_tipo_2)
#Verifica morte dos tipo 2
lista_mortos = self.verificar_morte(self.individuos_infectados_tipo_2)
#retirar os mortos da atualização da lista de infectados tipo 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_mortos]
#adiciona os novos mortos na lista geral de mortos
self.individuos_mortos = self.individuos_mortos + lista_mortos
#Verificar cura
lista_curados_tipo1 = self.verificar_cura(self.individuos_infectados_tipo_1)
lista_curados_tipo2 = self.verificar_cura(self.individuos_infectados_tipo_2)
#retirar os curados das lista de infectados tipo 1 e 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_curados_tipo2]
self.individuos_infectados_tipo_1 = [i for i in self.individuos_infectados_tipo_1 if i not in lista_curados_tipo1]
#adiciona os novos curados na lista geral de curados
self.individuos_curados = self.individuos_curados + lista_curados_tipo1 + lista_curados_tipo2
# self. #movimentar infectantes:
for i in range(len(self.individuos_infectados_tipo_1)):
self.individuos_infectados_tipo_1[i] = self.mover_infectante(self.individuos_infectados_tipo_1[i])
for i in range(len(self.individuos_infectados_tipo_2)):
self.individuos_infectados_tipo_2[i] = self.mover_infectante(self.individuos_infectados_tipo_2[i])
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.individuos_infectados_tipo_2 = self.individuos_infectados_tipo_2 + lista_novos_infectados_tipo2_1 + lista_novos_infectados_tipo2_2
self.individuos_infectados_tipo_1 = self.individuos_infectados_tipo_1 + lista_novos_infectados_tipo1_1 + lista_novos_infectados_tipo1_2
#salva os resultados da atualização no dataframe:
num_mortos = len(self.individuos_mortos)
num_curados = len(self.individuos_curados)
num_tipo_1 = len(self.individuos_infectados_tipo_1)
num_tipo_2 = len(self.individuos_infectados_tipo_2)
dict = {
'num_sadios':self.populacao_inicial - num_mortos - num_curados - num_tipo_1 - num_tipo_2 ,
'num_infect_t1':num_tipo_1,
'num_infect_t2':num_tipo_2,
'num_curados':num_curados,
'num_mortos':num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_1.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_2.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def mover_infectante(self, indice):
pos_x, pos_y = indice[0], indice[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
#troca os valores no dataframe
aux = self.matriz_individuos.loc[novo_x, novo_y]
self.matriz_individuos.loc[novo_x, novo_y] = self.matriz_individuos.loc[pos_x, pos_y]
self.matriz_individuos.loc[pos_x, pos_y] = aux
#troca os valores na matriz de status
aux = self.matriz_status[novo_x, novo_y]
self.matriz_status[novo_x, novo_y] = self.matriz_status[pos_x, pos_y]
self.matriz_status[pos_x, pos_y] = aux
return (novo_x, novo_y)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.
percentual_inicial_tipo2 = 0.
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.individuos_infectados_tipo_2)
#print(sim.individuos_infectados_tipo_1)
#cmap = ListedColormap(['w', 'y', 'yellow', 'red'])
plt.matshow(sim.lista_matrizes_posicionamento[0])#, cmap = cmap)
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[1])#, cmap = cmap)
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[2])#, cmap = cmap)
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[3])#, cmap = cmap)
#sim.iterar()
plt.show();
| 41.369492 | 145 | 0.649705 | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix
class Simulador():
def __init__(
self,
tamanho_matriz,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura):
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_curados = []
self.individuos_mortos = []
self.lista_matrizes_posicionamento = []
self.matriz_status = csr_matrix((tamanho_matriz,tamanho_matriz), dtype=np.int8)
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
self.dataframe = pd.DataFrame(index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
for indice in lista_infectantes:
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice[0], indice[1])
for vizinho in lista_vizinhos:
x = vizinho[0]
y = vizinho[1]
if self.matriz_status[x,y] == Individuo.SADIO:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].infectar()
if novo_status == Individuo.INFECTADO_TIPO_1:
lista_novos_infectados_tipo1.append((x,y))
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_1
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
lista_novos_infectados_tipo2.append((x,y))
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_2
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_mortos = []
for indice in lista_infectantes_tipo2:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_morte()
if novo_status == Individuo.MORTO:
lista_mortos.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.MORTO
return lista_mortos
def verificar_cura(self, lista_infectantes):
lista_curados = []
for indice in lista_infectantes:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_cura()
if novo_status == Individuo.CURADO:
lista_curados.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.CURADO
return lista_curados
def iterar(self):
lista_novos_infectados_tipo1_1, lista_novos_infectados_tipo2_1 = self.verificar_infeccao(self.individuos_infectados_tipo_1)
lista_novos_infectados_tipo1_2, lista_novos_infectados_tipo2_2 = self.verificar_infeccao(self.individuos_infectados_tipo_2)
lista_mortos = self.verificar_morte(self.individuos_infectados_tipo_2)
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_mortos]
self.individuos_mortos = self.individuos_mortos + lista_mortos
lista_curados_tipo1 = self.verificar_cura(self.individuos_infectados_tipo_1)
lista_curados_tipo2 = self.verificar_cura(self.individuos_infectados_tipo_2)
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_curados_tipo2]
self.individuos_infectados_tipo_1 = [i for i in self.individuos_infectados_tipo_1 if i not in lista_curados_tipo1]
self.individuos_curados = self.individuos_curados + lista_curados_tipo1 + lista_curados_tipo2
len(self.individuos_infectados_tipo_1)):
self.individuos_infectados_tipo_1[i] = self.mover_infectante(self.individuos_infectados_tipo_1[i])
for i in range(len(self.individuos_infectados_tipo_2)):
self.individuos_infectados_tipo_2[i] = self.mover_infectante(self.individuos_infectados_tipo_2[i])
self.individuos_infectados_tipo_2 = self.individuos_infectados_tipo_2 + lista_novos_infectados_tipo2_1 + lista_novos_infectados_tipo2_2
self.individuos_infectados_tipo_1 = self.individuos_infectados_tipo_1 + lista_novos_infectados_tipo1_1 + lista_novos_infectados_tipo1_2
num_mortos = len(self.individuos_mortos)
num_curados = len(self.individuos_curados)
num_tipo_1 = len(self.individuos_infectados_tipo_1)
num_tipo_2 = len(self.individuos_infectados_tipo_2)
dict = {
'num_sadios':self.populacao_inicial - num_mortos - num_curados - num_tipo_1 - num_tipo_2 ,
'num_infect_t1':num_tipo_1,
'num_infect_t2':num_tipo_2,
'num_curados':num_curados,
'num_mortos':num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
self.salvar_posicionamento()
self.num_atualizacoes +=1
def popular(self, tamanho_matriz):
permutacoes = permutations(list(range(tamanho_matriz)),2)
lista_indices = list(permutacoes)
random.shuffle(lista_indices)
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_1.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.individuos_infectados_tipo_2.append(self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y)))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def mover_infectante(self, indice):
pos_x, pos_y = indice[0], indice[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
pos_x -= 1
elif rng_posicao <=0.5:
pos_x += 1
elif rng_posicao <=0.75:
pos_y -= 1
else:
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
aux = self.matriz_individuos.loc[novo_x, novo_y]
self.matriz_individuos.loc[novo_x, novo_y] = self.matriz_individuos.loc[pos_x, pos_y]
self.matriz_individuos.loc[pos_x, pos_y] = aux
aux = self.matriz_status[novo_x, novo_y]
self.matriz_status[novo_x, novo_y] = self.matriz_status[pos_x, pos_y]
self.matriz_status[pos_x, pos_y] = aux
return (novo_x, novo_y)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.
percentual_inicial_tipo2 = 0.
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
plt.matshow(sim.lista_matrizes_posicionamento[0])
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[1])
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[2])
sim.iterar()
plt.matshow(sim.lista_matrizes_posicionamento[3])
plt.show();
| true | true |
f7f5f5981972a23bade957895f383213a96a34b1 | 8,253 | py | Python | ray/rllib/test/test_nested_spaces.py | hyyh28/tesp | 8109b39011e05545453950c918b14da07e70fad3 | [
"MIT"
] | 29 | 2019-05-18T12:18:34.000Z | 2022-03-30T01:46:48.000Z | ray/rllib/test/test_nested_spaces.py | kivo360/tesp | a77d9c228a6891b304e789ba2758a4cbfdb75ec0 | [
"MIT"
] | 8 | 2019-08-15T05:42:10.000Z | 2021-05-21T09:41:15.000Z | ray/rllib/test/test_nested_spaces.py | kivo360/tesp | a77d9c228a6891b304e789ba2758a4cbfdb75ec0 | [
"MIT"
] | 8 | 2019-07-15T22:36:20.000Z | 2020-08-09T07:03:26.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import tensorflow.contrib.slim as slim
import tensorflow as tf
import unittest
import ray
from ray.rllib.agents.pg import PGAgent
from ray.rllib.env.async_vector_env import AsyncVectorEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.model import Model
from ray.rllib.test.test_serving_env import SimpleServing
from ray.tune.registry import register_env
DICT_SPACE = spaces.Dict({
"sensors": spaces.Dict({
"position": spaces.Box(low=-100, high=100, shape=(3, )),
"velocity": spaces.Box(low=-1, high=1, shape=(3, )),
"front_cam": spaces.Tuple(
(spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
"inner_state": spaces.Dict({
"charge": spaces.Discrete(100),
"job_status": spaces.Dict({
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
})
})
})
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple([
spaces.Box(low=-100, high=100, shape=(3, )),
spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
spaces.Discrete(5),
])
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class InvalidModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return "not", "valid"
class DictSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, front_cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)))
DictSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"]
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = slim.fully_connected(
input_dict["obs"]["sensors"]["position"], num_outputs)
return output, output
class TupleSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)))
TupleSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = slim.fully_connected(input_dict["obs"][0], num_outputs)
return output, output
class NestedSpacesTest(unittest.TestCase):
def testInvalidModel(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaises(ValueError, lambda: PGAgent(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid",
},
}))
def doTestNestedDict(self, make_env):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGAgent(
env="nested",
config={
"num_workers": 0,
"sample_batch_size": 5,
"model": {
"custom_model": "composite",
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def doTestNestedTuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGAgent(
env="nested2",
config={
"num_workers": 0,
"sample_batch_size": 5,
"model": {
"custom_model": "composite2",
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testNestedDictGym(self):
self.doTestNestedDict(lambda _: NestedDictEnv())
def testNestedDictVector(self):
self.doTestNestedDict(
lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))
def testNestedDictServing(self):
self.doTestNestedDict(lambda _: SimpleServing(NestedDictEnv()))
def testNestedDictAsync(self):
self.assertRaisesRegexp(
ValueError, "Found raw Dict space.*",
lambda: self.doTestNestedDict(
lambda _: AsyncVectorEnv.wrap_async(NestedDictEnv())))
def testNestedTupleGym(self):
self.doTestNestedTuple(lambda _: NestedTupleEnv())
def testNestedTupleVector(self):
self.doTestNestedTuple(
lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))
def testNestedTupleServing(self):
self.doTestNestedTuple(lambda _: SimpleServing(NestedTupleEnv()))
def testNestedTupleAsync(self):
self.assertRaisesRegexp(
ValueError, "Found raw Tuple space.*",
lambda: self.doTestNestedTuple(
lambda _: AsyncVectorEnv.wrap_async(NestedTupleEnv())))
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
| 33.012 | 77 | 0.592996 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import tensorflow.contrib.slim as slim
import tensorflow as tf
import unittest
import ray
from ray.rllib.agents.pg import PGAgent
from ray.rllib.env.async_vector_env import AsyncVectorEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.model import Model
from ray.rllib.test.test_serving_env import SimpleServing
from ray.tune.registry import register_env
DICT_SPACE = spaces.Dict({
"sensors": spaces.Dict({
"position": spaces.Box(low=-100, high=100, shape=(3, )),
"velocity": spaces.Box(low=-1, high=1, shape=(3, )),
"front_cam": spaces.Tuple(
(spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
"inner_state": spaces.Dict({
"charge": spaces.Discrete(100),
"job_status": spaces.Dict({
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
})
})
})
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple([
spaces.Box(low=-100, high=100, shape=(3, )),
spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
spaces.Discrete(5),
])
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class InvalidModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return "not", "valid"
class DictSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, front_cam, task):
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)))
DictSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"]
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = slim.fully_connected(
input_dict["obs"]["sensors"]["position"], num_outputs)
return output, output
class TupleSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, cam, task):
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)))
TupleSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = slim.fully_connected(input_dict["obs"][0], num_outputs)
return output, output
class NestedSpacesTest(unittest.TestCase):
def testInvalidModel(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaises(ValueError, lambda: PGAgent(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid",
},
}))
def doTestNestedDict(self, make_env):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGAgent(
env="nested",
config={
"num_workers": 0,
"sample_batch_size": 5,
"model": {
"custom_model": "composite",
},
})
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def doTestNestedTuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGAgent(
env="nested2",
config={
"num_workers": 0,
"sample_batch_size": 5,
"model": {
"custom_model": "composite2",
},
})
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testNestedDictGym(self):
self.doTestNestedDict(lambda _: NestedDictEnv())
def testNestedDictVector(self):
self.doTestNestedDict(
lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))
def testNestedDictServing(self):
self.doTestNestedDict(lambda _: SimpleServing(NestedDictEnv()))
def testNestedDictAsync(self):
self.assertRaisesRegexp(
ValueError, "Found raw Dict space.*",
lambda: self.doTestNestedDict(
lambda _: AsyncVectorEnv.wrap_async(NestedDictEnv())))
def testNestedTupleGym(self):
self.doTestNestedTuple(lambda _: NestedTupleEnv())
def testNestedTupleVector(self):
self.doTestNestedTuple(
lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))
def testNestedTupleServing(self):
self.doTestNestedTuple(lambda _: SimpleServing(NestedTupleEnv()))
def testNestedTupleAsync(self):
self.assertRaisesRegexp(
ValueError, "Found raw Tuple space.*",
lambda: self.doTestNestedTuple(
lambda _: AsyncVectorEnv.wrap_async(NestedTupleEnv())))
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
| true | true |
f7f5f60ff05545d42c50ea3f9b168149ed27046b | 849 | py | Python | plugins/hypervisors/baremetal/resources/security_group_agent/security_group_agent/__init__.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-05-19T05:04:30.000Z | 2016-09-07T00:33:17.000Z | plugins/hypervisors/baremetal/resources/security_group_agent/security_group_agent/__init__.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2020-11-16T20:46:14.000Z | 2022-02-01T01:06:16.000Z | plugins/hypervisors/baremetal/resources/security_group_agent/security_group_agent/__init__.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2017-07-07T14:49:03.000Z | 2018-07-31T06:38:42.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Automatically generated by addcopyright.py at 01/29/2013
| 44.684211 | 62 | 0.771496 | true | true | |
f7f5f68d0463bf2c16e7e6eb15df13ada49da6f4 | 17,283 | py | Python | plugins/modules/oci_dns_steering_policy_attachment.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_dns_steering_policy_attachment.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_dns_steering_policy_attachment.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dns_steering_policy_attachment
short_description: Manage a SteeringPolicyAttachment resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a SteeringPolicyAttachment resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new attachment between a steering policy and a domain, giving the
policy permission to answer queries for the specified domain. A steering policy must
be attached to a domain for the policy to answer DNS queries for that domain.
- For the purposes of access control, the attachment is automatically placed
into the same compartment as the domain's zone.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
steering_policy_id:
description:
- The OCID of the attached steering policy.
- Required for create using I(state=present).
type: str
zone_id:
description:
- The OCID of the attached zone.
- Required for create using I(state=present).
type: str
domain_name:
description:
- The attached domain within the attached zone.
- Required for create using I(state=present).
type: str
display_name:
description:
- A user-friendly name for the steering policy attachment.
Does not have to be unique and can be changed.
Avoid entering confidential information.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
scope:
description:
- Specifies to operate only on resources that have a matching DNS scope.
- This parameter is updatable.
type: str
choices:
- "GLOBAL"
- "PRIVATE"
steering_policy_attachment_id:
description:
- The OCID of the target steering policy attachment.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
if_unmodified_since:
description:
- The `If-Unmodified-Since` header field makes the request method
conditional on the selected representation's last modification date being
earlier than or equal to the date provided in the field-value. This
field accomplishes the same purpose as If-Match for cases where the user
agent does not have an entity-tag for the representation.
- This parameter is updatable.
type: str
compartment_id:
description:
- The OCID of the compartment the resource belongs to.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
state:
description:
- The state of the SteeringPolicyAttachment.
- Use I(state=present) to create or update a SteeringPolicyAttachment.
- Use I(state=absent) to delete a SteeringPolicyAttachment.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_id: "ocid1.dnspolicy.oc1.."
zone_id: "ocid1.dns-zone.oc1.."
domain_name: example.com
# optional
display_name: attached to example
scope: GLOBAL
- name: Update steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_attachment_id: "ocid1.steeringpolicyattachment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: attached to example
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Update steering_policy_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_dns_steering_policy_attachment:
# required
display_name: attached to example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Delete steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_attachment_id: "ocid1.steeringpolicyattachment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
# optional
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Delete steering_policy_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_dns_steering_policy_attachment:
# required
display_name: attached to example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
steering_policy_attachment:
description:
- Details of the SteeringPolicyAttachment resource acted upon by the current operation
returned: on success
type: complex
contains:
steering_policy_id:
description:
- The OCID of the attached steering policy.
returned: on success
type: str
sample: "ocid1.steeringpolicy.oc1..xxxxxxEXAMPLExxxxxx"
zone_id:
description:
- The OCID of the attached zone.
returned: on success
type: str
sample: "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx"
domain_name:
description:
- The attached domain within the attached zone.
returned: on success
type: str
sample: domain_name_example
display_name:
description:
- A user-friendly name for the steering policy attachment.
Does not have to be unique and can be changed.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
rtypes:
description:
- The record types covered by the attachment at the domain. The set of record types is
determined by aggregating the record types from the answers defined in the steering
policy.
returned: on success
type: list
sample: []
compartment_id:
description:
- The OCID of the compartment containing the steering policy attachment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
_self:
description:
- The canonical absolute URL of the resource.
returned: on success
type: str
sample: _self_example
id:
description:
- The OCID of the resource.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The date and time the resource was created, expressed in RFC 3339 timestamp format.
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the resource.
returned: on success
type: str
sample: CREATING
sample: {
"steering_policy_id": "ocid1.steeringpolicy.oc1..xxxxxxEXAMPLExxxxxx",
"zone_id": "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx",
"domain_name": "domain_name_example",
"display_name": "display_name_example",
"rtypes": [],
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"_self": "_self_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "CREATING"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.dns import DnsClient
from oci.dns.models import CreateSteeringPolicyAttachmentDetails
from oci.dns.models import UpdateSteeringPolicyAttachmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SteeringPolicyAttachmentHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "steering_policy_attachment_id"
def get_module_resource_id(self):
return self.module.params.get("steering_policy_attachment_id")
def get_get_fn(self):
return self.client.get_steering_policy_attachment
def get_resource(self):
optional_params = [
"scope",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_steering_policy_attachment,
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
**optional_kwargs
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = [
"display_name",
"steering_policy_id",
"zone_id",
"scope",
]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_steering_policy_attachments, **kwargs
)
def get_create_model_class(self):
return CreateSteeringPolicyAttachmentDetails
def create_resource(self):
create_details = self.get_create_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
create_steering_policy_attachment_details=create_details,
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateSteeringPolicyAttachmentDetails
def update_resource(self):
update_details = self.get_update_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
update_steering_policy_attachment_details=update_details,
if_unmodified_since=self.module.params.get("if_unmodified_since"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
if_unmodified_since=self.module.params.get("if_unmodified_since"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
SteeringPolicyAttachmentHelperCustom = get_custom_class(
"SteeringPolicyAttachmentHelperCustom"
)
class ResourceHelper(
SteeringPolicyAttachmentHelperCustom, SteeringPolicyAttachmentHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
steering_policy_id=dict(type="str"),
zone_id=dict(type="str"),
domain_name=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
scope=dict(type="str", choices=["GLOBAL", "PRIVATE"]),
steering_policy_attachment_id=dict(aliases=["id"], type="str"),
if_unmodified_since=dict(type="str"),
compartment_id=dict(type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="steering_policy_attachment",
service_client_class=DnsClient,
namespace="dns",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 36.308824 | 129 | 0.648556 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dns_steering_policy_attachment
short_description: Manage a SteeringPolicyAttachment resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a SteeringPolicyAttachment resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new attachment between a steering policy and a domain, giving the
policy permission to answer queries for the specified domain. A steering policy must
be attached to a domain for the policy to answer DNS queries for that domain.
- For the purposes of access control, the attachment is automatically placed
into the same compartment as the domain's zone.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
steering_policy_id:
description:
- The OCID of the attached steering policy.
- Required for create using I(state=present).
type: str
zone_id:
description:
- The OCID of the attached zone.
- Required for create using I(state=present).
type: str
domain_name:
description:
- The attached domain within the attached zone.
- Required for create using I(state=present).
type: str
display_name:
description:
- A user-friendly name for the steering policy attachment.
Does not have to be unique and can be changed.
Avoid entering confidential information.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
scope:
description:
- Specifies to operate only on resources that have a matching DNS scope.
- This parameter is updatable.
type: str
choices:
- "GLOBAL"
- "PRIVATE"
steering_policy_attachment_id:
description:
- The OCID of the target steering policy attachment.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
if_unmodified_since:
description:
- The `If-Unmodified-Since` header field makes the request method
conditional on the selected representation's last modification date being
earlier than or equal to the date provided in the field-value. This
field accomplishes the same purpose as If-Match for cases where the user
agent does not have an entity-tag for the representation.
- This parameter is updatable.
type: str
compartment_id:
description:
- The OCID of the compartment the resource belongs to.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
state:
description:
- The state of the SteeringPolicyAttachment.
- Use I(state=present) to create or update a SteeringPolicyAttachment.
- Use I(state=absent) to delete a SteeringPolicyAttachment.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_id: "ocid1.dnspolicy.oc1.."
zone_id: "ocid1.dns-zone.oc1.."
domain_name: example.com
# optional
display_name: attached to example
scope: GLOBAL
- name: Update steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_attachment_id: "ocid1.steeringpolicyattachment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: attached to example
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Update steering_policy_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_dns_steering_policy_attachment:
# required
display_name: attached to example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Delete steering_policy_attachment
oci_dns_steering_policy_attachment:
# required
steering_policy_attachment_id: "ocid1.steeringpolicyattachment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
# optional
scope: GLOBAL
if_unmodified_since: if_unmodified_since_example
- name: Delete steering_policy_attachment using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_dns_steering_policy_attachment:
# required
display_name: attached to example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
steering_policy_attachment:
description:
- Details of the SteeringPolicyAttachment resource acted upon by the current operation
returned: on success
type: complex
contains:
steering_policy_id:
description:
- The OCID of the attached steering policy.
returned: on success
type: str
sample: "ocid1.steeringpolicy.oc1..xxxxxxEXAMPLExxxxxx"
zone_id:
description:
- The OCID of the attached zone.
returned: on success
type: str
sample: "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx"
domain_name:
description:
- The attached domain within the attached zone.
returned: on success
type: str
sample: domain_name_example
display_name:
description:
- A user-friendly name for the steering policy attachment.
Does not have to be unique and can be changed.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
rtypes:
description:
- The record types covered by the attachment at the domain. The set of record types is
determined by aggregating the record types from the answers defined in the steering
policy.
returned: on success
type: list
sample: []
compartment_id:
description:
- The OCID of the compartment containing the steering policy attachment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
_self:
description:
- The canonical absolute URL of the resource.
returned: on success
type: str
sample: _self_example
id:
description:
- The OCID of the resource.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The date and time the resource was created, expressed in RFC 3339 timestamp format.
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the resource.
returned: on success
type: str
sample: CREATING
sample: {
"steering_policy_id": "ocid1.steeringpolicy.oc1..xxxxxxEXAMPLExxxxxx",
"zone_id": "ocid1.zone.oc1..xxxxxxEXAMPLExxxxxx",
"domain_name": "domain_name_example",
"display_name": "display_name_example",
"rtypes": [],
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"_self": "_self_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "CREATING"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.dns import DnsClient
from oci.dns.models import CreateSteeringPolicyAttachmentDetails
from oci.dns.models import UpdateSteeringPolicyAttachmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SteeringPolicyAttachmentHelperGen(OCIResourceHelperBase):
def get_module_resource_id_param(self):
return "steering_policy_attachment_id"
def get_module_resource_id(self):
return self.module.params.get("steering_policy_attachment_id")
def get_get_fn(self):
return self.client.get_steering_policy_attachment
def get_resource(self):
optional_params = [
"scope",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_steering_policy_attachment,
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
**optional_kwargs
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = [
"display_name",
"steering_policy_id",
"zone_id",
"scope",
]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_steering_policy_attachments, **kwargs
)
def get_create_model_class(self):
return CreateSteeringPolicyAttachmentDetails
def create_resource(self):
create_details = self.get_create_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
create_steering_policy_attachment_details=create_details,
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateSteeringPolicyAttachmentDetails
def update_resource(self):
update_details = self.get_update_model()
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
update_steering_policy_attachment_details=update_details,
if_unmodified_since=self.module.params.get("if_unmodified_since"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
optional_enum_params = [
"scope",
]
optional_enum_kwargs = dict(
(param, self.module.params[param])
for param in optional_enum_params
if self.module.params.get(param) is not None
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_steering_policy_attachment,
call_fn_args=(),
call_fn_kwargs=dict(
steering_policy_attachment_id=self.module.params.get(
"steering_policy_attachment_id"
),
if_unmodified_since=self.module.params.get("if_unmodified_since"),
**optional_enum_kwargs
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
SteeringPolicyAttachmentHelperCustom = get_custom_class(
"SteeringPolicyAttachmentHelperCustom"
)
class ResourceHelper(
SteeringPolicyAttachmentHelperCustom, SteeringPolicyAttachmentHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
steering_policy_id=dict(type="str"),
zone_id=dict(type="str"),
domain_name=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
scope=dict(type="str", choices=["GLOBAL", "PRIVATE"]),
steering_policy_attachment_id=dict(aliases=["id"], type="str"),
if_unmodified_since=dict(type="str"),
compartment_id=dict(type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="steering_policy_attachment",
service_client_class=DnsClient,
namespace="dns",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| true | true |
f7f5f6eaae516b2a98bd07ca9376aebaf2aff1c1 | 492 | py | Python | src/logger.py | PJUllrich/bunq-pinsparen | 6f22b1d5c1c66533007f3823e628f9014ee88cdc | [
"MIT"
] | 3 | 2018-05-15T05:39:27.000Z | 2018-07-07T17:45:38.000Z | src/logger.py | PJUllrich/bunq-pinsparen | 6f22b1d5c1c66533007f3823e628f9014ee88cdc | [
"MIT"
] | 1 | 2021-06-01T22:21:05.000Z | 2021-06-01T22:21:05.000Z | src/logger.py | PJUllrich/bunq-pinsparen | 6f22b1d5c1c66533007f3823e628f9014ee88cdc | [
"MIT"
] | null | null | null | import logging
def setup_logger(filename):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('Logger is created.')
| 22.363636 | 89 | 0.689024 | import logging
def setup_logger(filename):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('Logger is created.')
| true | true |
f7f5f70b80620a4ed543fe9dcb18197caced5576 | 11,274 | py | Python | trac/versioncontrol/tests/diff.py | haskell-infra/trac | 5e086948718893ede9965ea4ce3625da32676730 | [
"BSD-3-Clause"
] | null | null | null | trac/versioncontrol/tests/diff.py | haskell-infra/trac | 5e086948718893ede9965ea4ce3625da32676730 | [
"BSD-3-Clause"
] | 1 | 2019-03-20T19:13:53.000Z | 2019-08-14T20:15:09.000Z | trac/versioncontrol/tests/diff.py | haskell-infra/trac | 5e086948718893ede9965ea4ce3625da32676730 | [
"BSD-3-Clause"
] | 2 | 2019-03-20T01:23:30.000Z | 2019-12-06T16:13:07.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.versioncontrol import diff
import unittest
def get_opcodes(*args, **kwargs):
for hunk in diff.get_filtered_hunks(*args, **kwargs):
for opcode in hunk:
yield opcode
class DiffTestCase(unittest.TestCase):
def testget_change_extent(self):
self.assertEqual((3, 0), diff.get_change_extent('xxx', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('xxx', ''))
self.assertEqual((0, 0), diff.get_change_extent('xxx', 'yyy'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyx'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyyyx'))
self.assertEqual((1, 0), diff.get_change_extent('xy', 'xzz'))
self.assertEqual((1, -1), diff.get_change_extent('xyx', 'xzzx'))
self.assertEqual((1, -1), diff.get_change_extent('xzzx', 'xyx'))
def test_insert_blank_line(self):
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('insert', 2, 2, 2, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 2, 0, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_delete_blank_line(self):
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('delete', 2, 3, 2, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 3, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_changes_2(self):
left = """\
try:
try:
func()
commit()
except:
rollback()
finally:
cleanup()
"""
left = left.splitlines()
right = """\
try:
func()
commit()
except:
rollback()
finally:
cleanup()
"""
right = right.splitlines()
opcodes = get_opcodes(left, right, ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 6, 1, 5), opcodes.next())
self.assertEqual(('equal', 6, 8, 5, 7), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(left, right, ignore_space_changes=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 2, 1, 1), opcodes.next())
self.assertEqual(('equal', 2, 4, 1, 3), opcodes.next())
self.assertEqual(('replace', 4, 5, 3, 4), opcodes.next())
self.assertEqual(('equal', 5, 8, 4, 7), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_and_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=0, ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=1, ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_grouped_opcodes_context1(self):
groups = diff.get_filtered_hunks(
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'],
['A', 'B', 'C', 'd', 'e', 'f', 'G', 'H'], context=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual(('equal', 2, 3, 2, 3), group[0])
self.assertEqual(('replace', 3, 6, 3, 6), group[1])
self.assertEqual(('equal', 6, 7, 6, 7), group[2])
def test_grouped_opcodes_context1_ignorecase(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=1, ignore_case=1)
group = groups.next()
self.assertEqual([('replace', 0, 1, 0, 1), ('equal', 1, 2, 1, 2)],
group)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([('equal', 6, 7, 6, 7), ('replace', 7, 8, 7, 8)],
group)
def test_grouped_opcodes_full_context(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=None)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 3, 1, 3),
('replace', 3, 6, 3, 6),
('equal', 6, 7, 6, 7),
('replace', 7, 8, 7, 8),
], group)
groups = diff.get_filtered_hunks(old, new, context=None, ignore_case=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 7, 1, 7),
('replace', 7, 8, 7, 8),
], group)
def test_grouped_opcodes_insert_blank_line_at_top(self):
"""
Regression test for #2090. Make sure that the equal block following an
insert at the top of a file is correct.
"""
groups = diff.get_filtered_hunks(['B', 'C', 'D', 'E', 'F', 'G'],
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
context=3)
self.assertEqual([('insert', 0, 0, 0, 1), ('equal', 0, 3, 1, 4)],
groups.next())
self.assertRaises(StopIteration, groups.next)
def test_unified_diff_no_context(self):
diff_lines = list(diff.unified_diff(['a'], ['b']))
self.assertEqual(['@@ -1,1 +1,1 @@', '-a', '+b'], diff_lines)
def test_quotes_not_marked_up(self):
"""Make sure that the escape calls leave quotes along, we don't need
to escape them."""
changes = diff.diff_blocks(['ab'], ['a"b'])
self.assertEqual(len(changes), 1)
blocks = changes[0]
self.assertEqual(len(blocks), 1)
block = blocks[0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), 'a<del></del>b')
self.assertEqual(str(block['changed']['lines'][0]), 'a<ins>"</ins>b')
def test_whitespace_marked_up1(self):
"""Regression test for #5795"""
changes = diff.diff_blocks(['*a'], [' *a'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), '<del></del>*a')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins> </ins>*a')
def test_whitespace_marked_up2(self):
"""Related to #5795"""
changes = diff.diff_blocks([' a'], [' b'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
' <del>a</del>')
self.assertEqual(str(block['changed']['lines'][0]),
' <ins>b</ins>')
def test_whitespace_marked_up3(self):
"""Related to #5795"""
changes = diff.diff_blocks(['a '], ['b '])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'<del>a</del> ')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins>b</ins> ')
def test_expandtabs_works_right(self):
"""Regression test for #4557"""
changes = diff.diff_blocks(['aa\tb'], ['aaxb'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'aa<del> </del>b')
self.assertEqual(str(block['changed']['lines'][0]),
'aa<ins>x</ins>b')
def test_suite():
return unittest.makeSuite(DiffTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 42.86692 | 79 | 0.553131 |
from trac.versioncontrol import diff
import unittest
def get_opcodes(*args, **kwargs):
for hunk in diff.get_filtered_hunks(*args, **kwargs):
for opcode in hunk:
yield opcode
class DiffTestCase(unittest.TestCase):
def testget_change_extent(self):
self.assertEqual((3, 0), diff.get_change_extent('xxx', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('xxx', ''))
self.assertEqual((0, 0), diff.get_change_extent('xxx', 'yyy'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyx'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyyyx'))
self.assertEqual((1, 0), diff.get_change_extent('xy', 'xzz'))
self.assertEqual((1, -1), diff.get_change_extent('xyx', 'xzzx'))
self.assertEqual((1, -1), diff.get_change_extent('xzzx', 'xyx'))
def test_insert_blank_line(self):
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('insert', 2, 2, 2, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 2, 0, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_delete_blank_line(self):
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('delete', 2, 3, 2, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 3, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_changes_2(self):
left = """\
try:
try:
func()
commit()
except:
rollback()
finally:
cleanup()
"""
left = left.splitlines()
right = """\
try:
func()
commit()
except:
rollback()
finally:
cleanup()
"""
right = right.splitlines()
opcodes = get_opcodes(left, right, ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 6, 1, 5), opcodes.next())
self.assertEqual(('equal', 6, 8, 5, 7), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(left, right, ignore_space_changes=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 2, 1, 1), opcodes.next())
self.assertEqual(('equal', 2, 4, 1, 3), opcodes.next())
self.assertEqual(('replace', 4, 5, 3, 4), opcodes.next())
self.assertEqual(('equal', 5, 8, 4, 7), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_and_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=0, ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=1, ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_grouped_opcodes_context1(self):
groups = diff.get_filtered_hunks(
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'],
['A', 'B', 'C', 'd', 'e', 'f', 'G', 'H'], context=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual(('equal', 2, 3, 2, 3), group[0])
self.assertEqual(('replace', 3, 6, 3, 6), group[1])
self.assertEqual(('equal', 6, 7, 6, 7), group[2])
def test_grouped_opcodes_context1_ignorecase(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=1, ignore_case=1)
group = groups.next()
self.assertEqual([('replace', 0, 1, 0, 1), ('equal', 1, 2, 1, 2)],
group)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([('equal', 6, 7, 6, 7), ('replace', 7, 8, 7, 8)],
group)
def test_grouped_opcodes_full_context(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=None)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 3, 1, 3),
('replace', 3, 6, 3, 6),
('equal', 6, 7, 6, 7),
('replace', 7, 8, 7, 8),
], group)
groups = diff.get_filtered_hunks(old, new, context=None, ignore_case=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 7, 1, 7),
('replace', 7, 8, 7, 8),
], group)
def test_grouped_opcodes_insert_blank_line_at_top(self):
groups = diff.get_filtered_hunks(['B', 'C', 'D', 'E', 'F', 'G'],
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
context=3)
self.assertEqual([('insert', 0, 0, 0, 1), ('equal', 0, 3, 1, 4)],
groups.next())
self.assertRaises(StopIteration, groups.next)
def test_unified_diff_no_context(self):
diff_lines = list(diff.unified_diff(['a'], ['b']))
self.assertEqual(['@@ -1,1 +1,1 @@', '-a', '+b'], diff_lines)
def test_quotes_not_marked_up(self):
changes = diff.diff_blocks(['ab'], ['a"b'])
self.assertEqual(len(changes), 1)
blocks = changes[0]
self.assertEqual(len(blocks), 1)
block = blocks[0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), 'a<del></del>b')
self.assertEqual(str(block['changed']['lines'][0]), 'a<ins>"</ins>b')
def test_whitespace_marked_up1(self):
changes = diff.diff_blocks(['*a'], [' *a'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), '<del></del>*a')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins> </ins>*a')
def test_whitespace_marked_up2(self):
changes = diff.diff_blocks([' a'], [' b'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
' <del>a</del>')
self.assertEqual(str(block['changed']['lines'][0]),
' <ins>b</ins>')
def test_whitespace_marked_up3(self):
changes = diff.diff_blocks(['a '], ['b '])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'<del>a</del> ')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins>b</ins> ')
def test_expandtabs_works_right(self):
changes = diff.diff_blocks(['aa\tb'], ['aaxb'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'aa<del> </del>b')
self.assertEqual(str(block['changed']['lines'][0]),
'aa<ins>x</ins>b')
def test_suite():
return unittest.makeSuite(DiffTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| true | true |
f7f5f799ce97fc0833ffb356c643821b217b8978 | 473 | py | Python | tests/Pyro4-4.17/tests/run_suite.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | tests/Pyro4-4.17/tests/run_suite.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | tests/Pyro4-4.17/tests/run_suite.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | null | null | null | """
Run the complete test suite.
This requires nose and coverage to be installed.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import sys
import os
import nose
dirname = os.path.dirname(__file__)
if dirname:
print("chdir to "+dirname)
os.chdir(dirname)
sys.path.insert(0,"../src") # add Pyro source directory
nose.main(argv=["noserunner", "--cover-erase","--with-coverage","--cover-package=Pyro4", "--with-xunit"])
| 21.5 | 105 | 0.699789 |
import sys
import os
import nose
dirname = os.path.dirname(__file__)
if dirname:
print("chdir to "+dirname)
os.chdir(dirname)
sys.path.insert(0,"../src")
nose.main(argv=["noserunner", "--cover-erase","--with-coverage","--cover-package=Pyro4", "--with-xunit"])
| true | true |
f7f5f7eca844e35718ef9a9eae00cd58cee25d96 | 300 | py | Python | edgeql_queries/typing.py | refi64/edgeql-queries | f4734545a250f6e8718a9f1da08d22dcd04605c5 | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2020-04-23T14:24:02.000Z | 2022-03-24T22:57:32.000Z | edgeql_queries/typing.py | refi64/edgeql-queries | f4734545a250f6e8718a9f1da08d22dcd04605c5 | [
"BSD-2-Clause-FreeBSD"
] | 160 | 2020-04-11T00:43:55.000Z | 2022-03-20T12:20:02.000Z | edgeql_queries/typing.py | refi64/edgeql-queries | f4734545a250f6e8718a9f1da08d22dcd04605c5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2022-03-19T16:49:43.000Z | 2022-03-19T16:49:43.000Z | """Definition for aliases for complex types from typing."""
from typing import Dict, Union
from edgeql_queries.models import Query
# mypy has limitation on understanding cyclic types
# https://github.com/python/mypy/issues/7069
QueriesTree = Dict[str, Union[Query, "QueriesTree"]] # type: ignore
| 30 | 68 | 0.77 |
from typing import Dict, Union
from edgeql_queries.models import Query
QueriesTree = Dict[str, Union[Query, "QueriesTree"]]
| true | true |
f7f5f80a953a80ba62f329d6e30bba461774ca45 | 947 | py | Python | model/prediction.py | Ajithsj96/RealTimeFRuseDeep | 50dd67856d178e7f2540922e8328241b9c79dbc0 | [
"MIT"
] | 275 | 2017-01-02T16:20:14.000Z | 2022-03-22T20:55:52.000Z | model/prediction.py | jamalmahfuz/Face-Recognition | a5ebfc1137bc3e8530ffa4423842d3b2ba13954a | [
"MIT"
] | 14 | 2017-05-29T09:33:53.000Z | 2019-03-09T19:01:56.000Z | model/prediction.py | jamalmahfuz/Face-Recognition | a5ebfc1137bc3e8530ffa4423842d3b2ba13954a | [
"MIT"
] | 125 | 2017-01-08T11:24:10.000Z | 2021-12-08T13:32:43.000Z | import argparse
import feature_utility as fu
import myVGG
import cv2
import numpy as np
parser = argparse.ArgumentParser(description=("Testing Prediction"))
parser.add_argument('--image', help=('Input an image to test model prediction'))
parser.add_argument('--dataset', help=('Input a directory to test model prediction'))
args = parser.parse_args()
def main():
model = myVGG.VGG_16('my_model_weights.h5')
if args.image is not None:
print ('Image Prediction Mode')
img = fu.preprocessing(cv2.imread(args.image))
X = np.expand_dims(img, axis=0)
X = np.expand_dims(X, axis=0)
result = model.predict(X)
print (result)
return
elif args.dataset is not None:
print ("Directory Prediction Mode")
X, y = fu.extract_features(args.dataset)
scores = model.evaluate(X, y, verbose=0)
print (scores)
return
if __name__ == "__main__":
main()
| 27.852941 | 85 | 0.659979 | import argparse
import feature_utility as fu
import myVGG
import cv2
import numpy as np
parser = argparse.ArgumentParser(description=("Testing Prediction"))
parser.add_argument('--image', help=('Input an image to test model prediction'))
parser.add_argument('--dataset', help=('Input a directory to test model prediction'))
args = parser.parse_args()
def main():
model = myVGG.VGG_16('my_model_weights.h5')
if args.image is not None:
print ('Image Prediction Mode')
img = fu.preprocessing(cv2.imread(args.image))
X = np.expand_dims(img, axis=0)
X = np.expand_dims(X, axis=0)
result = model.predict(X)
print (result)
return
elif args.dataset is not None:
print ("Directory Prediction Mode")
X, y = fu.extract_features(args.dataset)
scores = model.evaluate(X, y, verbose=0)
print (scores)
return
if __name__ == "__main__":
main()
| true | true |
f7f5f92d30e182bcda0915196f2c853e00370e73 | 5,597 | py | Python | frl/data.py | ThirstyScholar/trading-bitcoin-with-reinforcement-learning | e2163e954b1f5f656c49fbfb560ddd4635548a91 | [
"MIT"
] | 99 | 2018-05-16T11:51:26.000Z | 2022-01-23T16:37:25.000Z | frl/data.py | ThirstyScholar/BTCwithRL | e2163e954b1f5f656c49fbfb560ddd4635548a91 | [
"MIT"
] | 7 | 2018-07-31T17:40:16.000Z | 2022-03-04T15:36:52.000Z | frl/data.py | ThirstyScholar/BTCwithRL | e2163e954b1f5f656c49fbfb560ddd4635548a91 | [
"MIT"
] | 33 | 2018-07-17T18:42:58.000Z | 2022-03-21T17:46:13.000Z | import numpy as np
import pandas as pd
class Data(object):
def __init__(self, csv_path):
self.csv_path = csv_path
self.data = pd.read_csv(csv_path) # read CSV into DataFrame
self.feat = None
def __len__(self):
return len(self.data)
def remove_outlier(self):
idx = pd.datetime(2017, 4, 15, 23)
self.data.drop(index=idx, inplace=True)
self.feat.drop(index=idx, inplace=True)
def preprocess(self):
"""
Step 1. Create datetime index and select datatime range
Step 2. Drop columns 'Timestamp', 'Volume_(Currency)' and 'Weighted_Price'
Step 3. Rename 'Volume_(BTC)' as 'Volume'
Step 4. Resample to 15-minute bars and drop NaN values
:return: None
"""
# Step 1
self.data.index = pd.to_datetime(self.data['Timestamp'], unit='s')
self.data = self.data.loc[self.data.index < pd.datetime(2017, 7, 1)]
# Step 2
self.data.drop(['Timestamp', 'Volume_(Currency)', 'Weighted_Price'], axis=1, inplace=True)
# Step 3
self.data.rename(columns={'Volume_(BTC)': 'Volume'}, inplace=True)
# Step 4
self.data = self.data.groupby(pd.Grouper(freq='15Min')).aggregate({
'Open': 'first',
'High': 'max',
'Low': 'min',
'Close': 'last',
'Volume': 'sum'
})
self.data.dropna(inplace=True)
def extract_feature(self):
"""
Step 1. Create an empty feature DataFrame
Step 2. Calculate features
Step 3. Drop rows with NaN values
Step 4. Remove outlier
:return: None
"""
# Step 1
self.feat = pd.DataFrame(index=self.data.index)
# Step 2
cls = self.data['Close']
vol = self.data['Volume']
np_cls = np.log(cls)
self.feat['r'] = np_cls.diff()
self.feat['r_1'] = self.feat['r'].shift(1)
self.feat['r_2'] = self.feat['r'].shift(2)
r = self.feat['r']
self.feat['rZ12'] = Data.zscore(r, 12)
self.feat['rZ96'] = Data.zscore(r, 96)
self.feat['pma12'] = Data.zscore(Data.ser2ma_ret(cls, 12), 96)
self.feat['pma96'] = Data.zscore(Data.ser2ma_ret(cls, 96), 96)
self.feat['pma672'] = Data.zscore(Data.ser2ma_ret(cls, 672), 96)
self.feat['ma4/36'] = Data.zscore(Data.ma2ma_ret(cls, 4, 36), 96)
self.feat['ma12/96'] = Data.zscore(Data.ma2ma_ret(cls, 12, 96), 96)
self.feat['ac12/12'] = Data.zscore(Data.acceleration(cls, 12, 12), 96)
self.feat['ac96/96'] = Data.zscore(Data.acceleration(cls, 96, 12), 96)
self.feat['vZ12'] = Data.zscore(vol, 12)
self.feat['vZ96'] = Data.zscore(vol, 96)
self.feat['vZ672'] = Data.zscore(vol, 672)
self.feat['vma12'] = Data.zscore(Data.ser2ma_ret(vol, 12), 96)
self.feat['vma96'] = Data.zscore(Data.ser2ma_ret(vol, 96), 96)
self.feat['vma672'] = Data.zscore(Data.ser2ma_ret(vol, 672), 96)
vola_12 = Data.roll_std(r, 12) # 12-period volatility
vola_96 = Data.roll_std(r, 96)
vola_672 = Data.roll_std(r, 672)
self.feat['vol12'] = Data.zscore(vola_12, 96)
self.feat['vol96'] = Data.zscore(vola_96, 96)
self.feat['vol672'] = Data.zscore(vola_672, 96)
self.feat['dv12/96'] = Data.zscore(Data.ser2ma_ret(vola_12, 96), 96)
self.feat['dv96/672'] = Data.zscore(Data.ser2ma_ret(vola_96, 672), 96)
# Step 3
self.feat.dropna(inplace=True)
self.data = self.data.loc[self.feat.index] # select data where feat are available
# Step 4
self.remove_outlier()
@staticmethod
def roll_mean(s, window):
"""
:param s: Pandas Series
:param window: int
:return: Pandas Series
"""
return s.rolling(window).mean()
@staticmethod
def roll_std(s, window):
"""
:param s: Pandas Series
:param window: int
:return: Pandas Series
"""
return s.rolling(window).std()
@staticmethod
def zscore(s, window):
"""
:param s: Pandas Series
:param window: int
:return: Pandas Series
"""
roll_mean = s.rolling(window).mean()
roll_std = s.rolling(window).std()
return (s - roll_mean) / (roll_std + 1e-6)
@staticmethod
def ser2ma_ret(s, window):
"""
Series-to-Moving Average return.
:param s: Pandas Series
:param window: int
:return: Pandas Series
"""
roll_mean = s.rolling(window).mean()
return (s - roll_mean) - 1
@staticmethod
def ma2ma_ret(s, window_1, window_2):
"""
Series-to-series return.
:param s: Pandas Series
:param window_1: int
:param window_2: int
:return: Pandas Series
"""
return s.rolling(window_1).mean() / s.rolling(window_2).mean() - 1
@staticmethod
def acceleration(s, window_1, window_2):
"""
See the definition from the original post "https://launchpad.ai/blog/trading-bitcoin"
:param s: Pandas Series
:param window_1: int
:param window_2: int
:return: Pandas Series
"""
tmp = s / s.rolling(window_1).mean()
return tmp / tmp.rolling(window_2).mean()
def test_data():
data_path = './bitcoin-historical-data/coinbaseUSD_1-min_data.csv'
data = Data(data_path)
data.preprocess()
data.extract_feature()
if __name__ == '__main__':
test_data()
| 30.418478 | 98 | 0.571556 | import numpy as np
import pandas as pd
class Data(object):
def __init__(self, csv_path):
self.csv_path = csv_path
self.data = pd.read_csv(csv_path)
self.feat = None
def __len__(self):
return len(self.data)
def remove_outlier(self):
idx = pd.datetime(2017, 4, 15, 23)
self.data.drop(index=idx, inplace=True)
self.feat.drop(index=idx, inplace=True)
def preprocess(self):
self.data.index = pd.to_datetime(self.data['Timestamp'], unit='s')
self.data = self.data.loc[self.data.index < pd.datetime(2017, 7, 1)]
self.data.drop(['Timestamp', 'Volume_(Currency)', 'Weighted_Price'], axis=1, inplace=True)
self.data.rename(columns={'Volume_(BTC)': 'Volume'}, inplace=True)
self.data = self.data.groupby(pd.Grouper(freq='15Min')).aggregate({
'Open': 'first',
'High': 'max',
'Low': 'min',
'Close': 'last',
'Volume': 'sum'
})
self.data.dropna(inplace=True)
def extract_feature(self):
self.feat = pd.DataFrame(index=self.data.index)
cls = self.data['Close']
vol = self.data['Volume']
np_cls = np.log(cls)
self.feat['r'] = np_cls.diff()
self.feat['r_1'] = self.feat['r'].shift(1)
self.feat['r_2'] = self.feat['r'].shift(2)
r = self.feat['r']
self.feat['rZ12'] = Data.zscore(r, 12)
self.feat['rZ96'] = Data.zscore(r, 96)
self.feat['pma12'] = Data.zscore(Data.ser2ma_ret(cls, 12), 96)
self.feat['pma96'] = Data.zscore(Data.ser2ma_ret(cls, 96), 96)
self.feat['pma672'] = Data.zscore(Data.ser2ma_ret(cls, 672), 96)
self.feat['ma4/36'] = Data.zscore(Data.ma2ma_ret(cls, 4, 36), 96)
self.feat['ma12/96'] = Data.zscore(Data.ma2ma_ret(cls, 12, 96), 96)
self.feat['ac12/12'] = Data.zscore(Data.acceleration(cls, 12, 12), 96)
self.feat['ac96/96'] = Data.zscore(Data.acceleration(cls, 96, 12), 96)
self.feat['vZ12'] = Data.zscore(vol, 12)
self.feat['vZ96'] = Data.zscore(vol, 96)
self.feat['vZ672'] = Data.zscore(vol, 672)
self.feat['vma12'] = Data.zscore(Data.ser2ma_ret(vol, 12), 96)
self.feat['vma96'] = Data.zscore(Data.ser2ma_ret(vol, 96), 96)
self.feat['vma672'] = Data.zscore(Data.ser2ma_ret(vol, 672), 96)
vola_12 = Data.roll_std(r, 12)
vola_96 = Data.roll_std(r, 96)
vola_672 = Data.roll_std(r, 672)
self.feat['vol12'] = Data.zscore(vola_12, 96)
self.feat['vol96'] = Data.zscore(vola_96, 96)
self.feat['vol672'] = Data.zscore(vola_672, 96)
self.feat['dv12/96'] = Data.zscore(Data.ser2ma_ret(vola_12, 96), 96)
self.feat['dv96/672'] = Data.zscore(Data.ser2ma_ret(vola_96, 672), 96)
self.feat.dropna(inplace=True)
self.data = self.data.loc[self.feat.index]
self.remove_outlier()
@staticmethod
def roll_mean(s, window):
return s.rolling(window).mean()
@staticmethod
def roll_std(s, window):
return s.rolling(window).std()
@staticmethod
def zscore(s, window):
roll_mean = s.rolling(window).mean()
roll_std = s.rolling(window).std()
return (s - roll_mean) / (roll_std + 1e-6)
@staticmethod
def ser2ma_ret(s, window):
roll_mean = s.rolling(window).mean()
return (s - roll_mean) - 1
@staticmethod
def ma2ma_ret(s, window_1, window_2):
return s.rolling(window_1).mean() / s.rolling(window_2).mean() - 1
@staticmethod
def acceleration(s, window_1, window_2):
tmp = s / s.rolling(window_1).mean()
return tmp / tmp.rolling(window_2).mean()
def test_data():
data_path = './bitcoin-historical-data/coinbaseUSD_1-min_data.csv'
data = Data(data_path)
data.preprocess()
data.extract_feature()
if __name__ == '__main__':
test_data()
| true | true |
f7f5f94b8026b5e9c70f1d9432e2c1ee8576e32d | 3,321 | py | Python | python/8.web/2.Django/base_demo/base_demo/settings.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
] | 25 | 2018-06-13T08:13:44.000Z | 2020-11-19T14:02:11.000Z | python/8.web/2.Django/base_demo/base_demo/settings.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
] | null | null | null | python/8.web/2.Django/base_demo/base_demo/settings.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
] | 13 | 2018-06-13T08:13:38.000Z | 2022-01-06T06:45:07.000Z | """
Django settings for base_demo project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(=nl3ovw%-1t4^8%x9ue(r41%k@6wju0nu&#o3n%27%@)#hc1j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 注册自己创建的应用
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'base_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
# 'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'base_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
# LANGUAGE_CODE = 'en-us'
# 使用中文(zh-hans可以这么记==>zh-汉'字')
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'UTC'
# 设置中国时间
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 26.357143 | 87 | 0.681421 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '(=nl3ovw%-1t4^8%x9ue(r41%k@6wju0nu&#o3n%27%@)#hc1j'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 注册自己创建的应用
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'base_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
# 'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'base_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
# LANGUAGE_CODE = 'en-us'
# 使用中文(zh-hans可以这么记==>zh-汉'字')
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'UTC'
# 设置中国时间
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f7f5f957f34a770e5dd6487886b6a08f91233a30 | 1,427 | py | Python | tinyms/app/__init__.py | xinwenh/tinyms | a7db052d6820ba1001b9a2be00b1fd108decd853 | [
"Apache-2.0"
] | null | null | null | tinyms/app/__init__.py | xinwenh/tinyms | a7db052d6820ba1001b9a2be00b1fd108decd853 | [
"Apache-2.0"
] | null | null | null | tinyms/app/__init__.py | xinwenh/tinyms | a7db052d6820ba1001b9a2be00b1fd108decd853 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This module is to support vision visualization with opencv, which can help
developers use pre-trained models to predict and show the reasoning image fast.
Current it only supports object detection model.
"""
from . import object_detection
from .object_detection.object_detector import object_detection_predict, ObjectDetector
from .object_detection.utils.view_util import visualize_boxes_on_image, draw_boxes_on_image, save_image
from .object_detection.utils.config_util import load_and_parse_config
object_detection_utils = ['visualize_boxes_on_image', 'draw_boxes_on_image', 'save_image', 'load_and_parse_config']
__all__ = ['ObjectDetector', 'object_detection_predict']
__all__.extend(object_detection_utils)
__all__.extend(object_detection.__all__)
| 46.032258 | 115 | 0.766643 |
from . import object_detection
from .object_detection.object_detector import object_detection_predict, ObjectDetector
from .object_detection.utils.view_util import visualize_boxes_on_image, draw_boxes_on_image, save_image
from .object_detection.utils.config_util import load_and_parse_config
object_detection_utils = ['visualize_boxes_on_image', 'draw_boxes_on_image', 'save_image', 'load_and_parse_config']
__all__ = ['ObjectDetector', 'object_detection_predict']
__all__.extend(object_detection_utils)
__all__.extend(object_detection.__all__)
| true | true |
f7f5fa3abae8d535ea8b1869b40c521aa1ee888c | 352 | py | Python | Download/copy_bin.py | ShareCat/zigbee_offline_programmer | fe46200c5d74f88f6831524cc030ae36877c0d38 | [
"MIT"
] | 4 | 2022-01-26T05:57:14.000Z | 2022-03-03T08:11:14.000Z | Download/copy_bin.py | ShareCat/zigbee_offline_programmer | fe46200c5d74f88f6831524cc030ae36877c0d38 | [
"MIT"
] | null | null | null | Download/copy_bin.py | ShareCat/zigbee_offline_programmer | fe46200c5d74f88f6831524cc030ae36877c0d38 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# import shutil & os
import shutil,os
print("coping bin file...");
# copy the .bin file
shutil.copy(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))+"\\Project\\RVMDK\\Output\\LHD8006_application.bin",os.path.abspath(os.path.dirname(__file__)))
print("copy done!");
# add "python ./../../../copy_bin.py" to MDK_keil
| 25.142857 | 167 | 0.698864 |
import shutil,os
print("coping bin file...");
shutil.copy(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))+"\\Project\\RVMDK\\Output\\LHD8006_application.bin",os.path.abspath(os.path.dirname(__file__)))
print("copy done!");
| true | true |
f7f5faa049aa7d515608e843d764b36a7479491d | 568 | py | Python | front/migrations/0003_auto_20161024_1533.py | ahemery/ezreports | 9ff0f472ff0f0efd06c8315b99084bd723b0e2f9 | [
"Apache-2.0"
] | null | null | null | front/migrations/0003_auto_20161024_1533.py | ahemery/ezreports | 9ff0f472ff0f0efd06c8315b99084bd723b0e2f9 | [
"Apache-2.0"
] | null | null | null | front/migrations/0003_auto_20161024_1533.py | ahemery/ezreports | 9ff0f472ff0f0efd06c8315b99084bd723b0e2f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 13:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('front', '0002_lien_disabled'),
]
operations = [
migrations.AlterField(
model_name='lien',
name='ressource',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='front.Ressource'),
),
]
| 25.818182 | 140 | 0.653169 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('front', '0002_lien_disabled'),
]
operations = [
migrations.AlterField(
model_name='lien',
name='ressource',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='front.Ressource'),
),
]
| true | true |
f7f5fb1477e0d06a0238672c9d24e3e6d8693c3e | 6,251 | py | Python | test/implementation/implementation_autograd.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 395 | 2019-10-04T09:37:52.000Z | 2022-03-29T18:00:56.000Z | test/implementation/implementation_autograd.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 78 | 2019-10-11T18:56:43.000Z | 2022-03-23T01:49:54.000Z | test/implementation/implementation_autograd.py | jabader97/backpack | 089daafa0d611e13901fd7ecf8a0d708ce7a5928 | [
"MIT"
] | 50 | 2019-10-03T16:31:10.000Z | 2022-03-15T19:36:14.000Z | import torch
from backpack.hessianfree.ggnvp import ggn_vector_product_from_plist
from backpack.hessianfree.hvp import hessian_vector_product
from backpack.hessianfree.rop import R_op
from backpack.utils.convert_parameters import vector_to_parameter_list
from .implementation import Implementation
class AutogradImpl(Implementation):
def gradient(self):
return list(torch.autograd.grad(self.loss(), self.model.parameters()))
def batch_gradients(self):
batch_grads = [
torch.zeros(self.N, *p.size()).to(self.device)
for p in self.model.parameters()
]
for b in range(self.N):
gradients = torch.autograd.grad(self.loss(b), self.model.parameters())
for idx, g in enumerate(gradients):
batch_grads[idx][b, :] = g.detach() / self.N
return batch_grads
def batch_l2(self):
batch_grad = self.batch_gradients()
batch_l2 = [(g ** 2).sum(list(range(1, len(g.shape)))) for g in batch_grad]
return batch_l2
def variance(self):
batch_grad = self.batch_gradients()
variances = [torch.var(g, dim=0, unbiased=False) for g in batch_grad]
return variances
def sgs(self):
sgs = self.plist_like(self.model.parameters())
for b in range(self.N):
gradients = torch.autograd.grad(self.loss(b), self.model.parameters())
for idx, g in enumerate(gradients):
sgs[idx] += (g.detach() / self.N) ** 2
return sgs
def diag_ggn(self):
outputs = self.model(self.problem.X)
loss = self.problem.lossfunc(outputs, self.problem.Y)
def extract_ith_element_of_diag_ggn(i, p):
v = torch.zeros(p.numel()).to(self.device)
v[i] = 1.0
vs = vector_to_parameter_list(v, [p])
GGN_vs = ggn_vector_product_from_plist(loss, outputs, [p], vs)
GGN_v = torch.cat([g.detach().view(-1) for g in GGN_vs])
return GGN_v[i]
diag_ggns = []
for p in list(self.model.parameters()):
diag_ggn_p = torch.zeros_like(p).view(-1)
for parameter_index in range(p.numel()):
diag_value = extract_ith_element_of_diag_ggn(parameter_index, p)
diag_ggn_p[parameter_index] = diag_value
diag_ggns.append(diag_ggn_p.view(p.size()))
return diag_ggns
def diag_h(self):
loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)
def hvp(df_dx, x, v):
Hv = R_op(df_dx, x, v)
return [j.detach() for j in Hv]
def extract_ith_element_of_diag_h(i, p, df_dx):
v = torch.zeros(p.numel()).to(self.device)
v[i] = 1.0
vs = vector_to_parameter_list(v, [p])
Hvs = hvp(df_dx, [p], vs)
Hv = torch.cat([g.detach().view(-1) for g in Hvs])
return Hv[i]
diag_hs = []
for p in list(self.model.parameters()):
diag_h_p = torch.zeros_like(p).view(-1)
df_dx = torch.autograd.grad(loss, [p], create_graph=True, retain_graph=True)
for parameter_index in range(p.numel()):
diag_value = extract_ith_element_of_diag_h(parameter_index, p, df_dx)
diag_h_p[parameter_index] = diag_value
diag_hs.append(diag_h_p.view(p.size()))
return diag_hs
def h_blocks(self):
mat_list = []
for p in self.model.parameters():
mat_list.append(
torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)
)
# return self.hmp(mat_list)
hmp_list = self.hmp(mat_list)
return [
mat.reshape(p.numel(), p.numel())
for mat, p in zip(hmp_list, self.model.parameters())
]
def hvp(self, vec_list):
mat_list = [vec.unsqueeze(0) for vec in vec_list]
results = self.hmp(mat_list)
results_vec = [mat.squeeze(0) for mat in results]
return results_vec
def hmp(self, mat_list):
assert len(mat_list) == len(list(self.model.parameters()))
loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)
results = []
for p, mat in zip(self.model.parameters(), mat_list):
results.append(self.hvp_applied_columnwise(loss, p, mat))
return results
def hvp_applied_columnwise(self, f, p, mat):
h_cols = []
for i in range(mat.size(0)):
hvp_col_i = hessian_vector_product(f, [p], mat[i, :])[0]
h_cols.append(hvp_col_i.unsqueeze(0))
return torch.cat(h_cols, dim=0)
def ggn_blocks(self):
mat_list = []
for p in self.model.parameters():
mat_list.append(
torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)
)
ggn_mp_list = self.ggn_mp(mat_list)
return [
mat.reshape(p.numel(), p.numel())
for mat, p in zip(ggn_mp_list, self.model.parameters())
]
# return ggn_mp_list
def ggn_vp(self, vec_list):
mat_list = [vec.unsqueeze(0) for vec in vec_list]
results = self.ggn_mp(mat_list)
results_vec = [mat.squeeze(0) for mat in results]
return results_vec
def ggn_mp(self, mat_list):
assert len(mat_list) == len(list(self.model.parameters()))
outputs = self.model(self.problem.X)
loss = self.problem.lossfunc(outputs, self.problem.Y)
results = []
for p, mat in zip(self.model.parameters(), mat_list):
results.append(self.ggn_vp_applied_columnwise(loss, outputs, p, mat))
return results
def ggn_vp_applied_columnwise(self, loss, out, p, mat):
ggn_cols = []
for i in range(mat.size(0)):
col_i = mat[i, :]
GGN_col_i = ggn_vector_product_from_plist(loss, out, [p], col_i)[0]
ggn_cols.append(GGN_col_i.unsqueeze(0))
return torch.cat(ggn_cols, dim=0)
def plist_like(self, plist):
return [torch.zeros(*p.size()).to(self.device) for p in plist]
def parameter_numels(self):
return [p.numel() for p in self.model.parameters()]
| 33.789189 | 88 | 0.595265 | import torch
from backpack.hessianfree.ggnvp import ggn_vector_product_from_plist
from backpack.hessianfree.hvp import hessian_vector_product
from backpack.hessianfree.rop import R_op
from backpack.utils.convert_parameters import vector_to_parameter_list
from .implementation import Implementation
class AutogradImpl(Implementation):
def gradient(self):
return list(torch.autograd.grad(self.loss(), self.model.parameters()))
def batch_gradients(self):
batch_grads = [
torch.zeros(self.N, *p.size()).to(self.device)
for p in self.model.parameters()
]
for b in range(self.N):
gradients = torch.autograd.grad(self.loss(b), self.model.parameters())
for idx, g in enumerate(gradients):
batch_grads[idx][b, :] = g.detach() / self.N
return batch_grads
def batch_l2(self):
batch_grad = self.batch_gradients()
batch_l2 = [(g ** 2).sum(list(range(1, len(g.shape)))) for g in batch_grad]
return batch_l2
def variance(self):
batch_grad = self.batch_gradients()
variances = [torch.var(g, dim=0, unbiased=False) for g in batch_grad]
return variances
def sgs(self):
sgs = self.plist_like(self.model.parameters())
for b in range(self.N):
gradients = torch.autograd.grad(self.loss(b), self.model.parameters())
for idx, g in enumerate(gradients):
sgs[idx] += (g.detach() / self.N) ** 2
return sgs
def diag_ggn(self):
outputs = self.model(self.problem.X)
loss = self.problem.lossfunc(outputs, self.problem.Y)
def extract_ith_element_of_diag_ggn(i, p):
v = torch.zeros(p.numel()).to(self.device)
v[i] = 1.0
vs = vector_to_parameter_list(v, [p])
GGN_vs = ggn_vector_product_from_plist(loss, outputs, [p], vs)
GGN_v = torch.cat([g.detach().view(-1) for g in GGN_vs])
return GGN_v[i]
diag_ggns = []
for p in list(self.model.parameters()):
diag_ggn_p = torch.zeros_like(p).view(-1)
for parameter_index in range(p.numel()):
diag_value = extract_ith_element_of_diag_ggn(parameter_index, p)
diag_ggn_p[parameter_index] = diag_value
diag_ggns.append(diag_ggn_p.view(p.size()))
return diag_ggns
def diag_h(self):
loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)
def hvp(df_dx, x, v):
Hv = R_op(df_dx, x, v)
return [j.detach() for j in Hv]
def extract_ith_element_of_diag_h(i, p, df_dx):
v = torch.zeros(p.numel()).to(self.device)
v[i] = 1.0
vs = vector_to_parameter_list(v, [p])
Hvs = hvp(df_dx, [p], vs)
Hv = torch.cat([g.detach().view(-1) for g in Hvs])
return Hv[i]
diag_hs = []
for p in list(self.model.parameters()):
diag_h_p = torch.zeros_like(p).view(-1)
df_dx = torch.autograd.grad(loss, [p], create_graph=True, retain_graph=True)
for parameter_index in range(p.numel()):
diag_value = extract_ith_element_of_diag_h(parameter_index, p, df_dx)
diag_h_p[parameter_index] = diag_value
diag_hs.append(diag_h_p.view(p.size()))
return diag_hs
def h_blocks(self):
mat_list = []
for p in self.model.parameters():
mat_list.append(
torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)
)
hmp_list = self.hmp(mat_list)
return [
mat.reshape(p.numel(), p.numel())
for mat, p in zip(hmp_list, self.model.parameters())
]
def hvp(self, vec_list):
mat_list = [vec.unsqueeze(0) for vec in vec_list]
results = self.hmp(mat_list)
results_vec = [mat.squeeze(0) for mat in results]
return results_vec
def hmp(self, mat_list):
assert len(mat_list) == len(list(self.model.parameters()))
loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)
results = []
for p, mat in zip(self.model.parameters(), mat_list):
results.append(self.hvp_applied_columnwise(loss, p, mat))
return results
def hvp_applied_columnwise(self, f, p, mat):
h_cols = []
for i in range(mat.size(0)):
hvp_col_i = hessian_vector_product(f, [p], mat[i, :])[0]
h_cols.append(hvp_col_i.unsqueeze(0))
return torch.cat(h_cols, dim=0)
def ggn_blocks(self):
mat_list = []
for p in self.model.parameters():
mat_list.append(
torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)
)
ggn_mp_list = self.ggn_mp(mat_list)
return [
mat.reshape(p.numel(), p.numel())
for mat, p in zip(ggn_mp_list, self.model.parameters())
]
def ggn_vp(self, vec_list):
mat_list = [vec.unsqueeze(0) for vec in vec_list]
results = self.ggn_mp(mat_list)
results_vec = [mat.squeeze(0) for mat in results]
return results_vec
def ggn_mp(self, mat_list):
assert len(mat_list) == len(list(self.model.parameters()))
outputs = self.model(self.problem.X)
loss = self.problem.lossfunc(outputs, self.problem.Y)
results = []
for p, mat in zip(self.model.parameters(), mat_list):
results.append(self.ggn_vp_applied_columnwise(loss, outputs, p, mat))
return results
def ggn_vp_applied_columnwise(self, loss, out, p, mat):
ggn_cols = []
for i in range(mat.size(0)):
col_i = mat[i, :]
GGN_col_i = ggn_vector_product_from_plist(loss, out, [p], col_i)[0]
ggn_cols.append(GGN_col_i.unsqueeze(0))
return torch.cat(ggn_cols, dim=0)
def plist_like(self, plist):
return [torch.zeros(*p.size()).to(self.device) for p in plist]
def parameter_numels(self):
return [p.numel() for p in self.model.parameters()]
| true | true |
f7f5fb4f1fbf92e0e3cdde451db21747259e0df8 | 122 | py | Python | account/admin.py | HarshilShrivastava/TrueCaller-Backend | 8303fbd1ac6ddc79a50803da6b682deb5b9dddfe | [
"MIT"
] | null | null | null | account/admin.py | HarshilShrivastava/TrueCaller-Backend | 8303fbd1ac6ddc79a50803da6b682deb5b9dddfe | [
"MIT"
] | null | null | null | account/admin.py | HarshilShrivastava/TrueCaller-Backend | 8303fbd1ac6ddc79a50803da6b682deb5b9dddfe | [
"MIT"
] | null | null | null | from django.contrib import admin
from account.models import User
admin.site.register(User)
# Register your models here.
| 24.4 | 33 | 0.803279 | from django.contrib import admin
from account.models import User
admin.site.register(User)
| true | true |
f7f5fd92a7d3810e1cd7f038ab2a83ce83eadd73 | 387 | py | Python | students/K33422/Smolenskaya Tatyana/Lr4/lab_3/asgi.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | null | null | null | students/K33422/Smolenskaya Tatyana/Lr4/lab_3/asgi.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | null | null | null | students/K33422/Smolenskaya Tatyana/Lr4/lab_3/asgi.py | agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021 | 7d5eab0d68af378083f21473cbbd5e5def6aa60a | [
"MIT"
] | null | null | null | """
ASGI config for lab_3 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab_3.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab_3.settings')
application = get_asgi_application()
| true | true |
f7f5feac18d8be5171c374b058a7c66d5ff83842 | 1,788 | py | Python | tests/modules/jmeter/__init__.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 1 | 2018-02-17T16:00:34.000Z | 2018-02-17T16:00:34.000Z | tests/modules/jmeter/__init__.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 5 | 2018-03-10T20:50:24.000Z | 2021-08-20T15:07:32.000Z | tests/modules/jmeter/__init__.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 1 | 2018-05-04T23:06:15.000Z | 2018-05-04T23:06:15.000Z | import logging
from tests.mocks import EngineEmul
from bzt.modules.jmeter import JMeter, JMeterExecutor
from bzt.utils import get_full_path
class MockJMeter(JMeter):
def __init__(self, has_ctg=None, reaction=None):
jmeter_version = JMeterExecutor.JMETER_VER
jmeter_path = "~/.bzt/jmeter-taurus/{version}/"
jmeter_path = get_full_path(jmeter_path)
super(MockJMeter, self).__init__(tool_path=jmeter_path, parent_logger=logging.getLogger(''),
jmeter_version=jmeter_version, jmeter_download_link=None, plugins=[], proxy={})
self.has_ctg = has_ctg
self.reaction = reaction if reaction else []
def ctg_plugin_installed(self):
return self.has_ctg
def _pmgr_call(self, params):
# replaces real pmgr call
reaction = self.reaction.pop(0)
if 'raise' in reaction:
raise reaction['raise']
return reaction['output']
class MockJMeterExecutor(JMeterExecutor):
def __init__(self, load=None, settings=None, has_ctg=None):
super(MockJMeterExecutor, self).__init__()
self.mock_install = True
self.version = None
if load is None: load = {}
if settings is None: settings = {}
if has_ctg is None: has_ctg = True
self.engine = EngineEmul()
self.env = self.engine.env
self.execution.merge(load)
self.settings.merge({"detect-plugins": False})
self.settings.merge(settings)
self.tool = MockJMeter(has_ctg)
def install_required_tools(self):
if self.mock_install:
self.version = self.settings.get('version')
self.tool = MockJMeter()
else:
super(MockJMeterExecutor, self).install_required_tools()
| 33.111111 | 120 | 0.64877 | import logging
from tests.mocks import EngineEmul
from bzt.modules.jmeter import JMeter, JMeterExecutor
from bzt.utils import get_full_path
class MockJMeter(JMeter):
def __init__(self, has_ctg=None, reaction=None):
jmeter_version = JMeterExecutor.JMETER_VER
jmeter_path = "~/.bzt/jmeter-taurus/{version}/"
jmeter_path = get_full_path(jmeter_path)
super(MockJMeter, self).__init__(tool_path=jmeter_path, parent_logger=logging.getLogger(''),
jmeter_version=jmeter_version, jmeter_download_link=None, plugins=[], proxy={})
self.has_ctg = has_ctg
self.reaction = reaction if reaction else []
def ctg_plugin_installed(self):
return self.has_ctg
def _pmgr_call(self, params):
reaction = self.reaction.pop(0)
if 'raise' in reaction:
raise reaction['raise']
return reaction['output']
class MockJMeterExecutor(JMeterExecutor):
def __init__(self, load=None, settings=None, has_ctg=None):
super(MockJMeterExecutor, self).__init__()
self.mock_install = True
self.version = None
if load is None: load = {}
if settings is None: settings = {}
if has_ctg is None: has_ctg = True
self.engine = EngineEmul()
self.env = self.engine.env
self.execution.merge(load)
self.settings.merge({"detect-plugins": False})
self.settings.merge(settings)
self.tool = MockJMeter(has_ctg)
def install_required_tools(self):
if self.mock_install:
self.version = self.settings.get('version')
self.tool = MockJMeter()
else:
super(MockJMeterExecutor, self).install_required_tools()
| true | true |
f7f5ff252bbdfd7f2dfe8e3d880cf0c0cf3dc444 | 866 | py | Python | Lectures/tex/codes/lecture9.py | josh-gree/NumericalMethods | 03cb91114b3f5eb1b56916920ad180d371fe5283 | [
"CC-BY-3.0"
] | 76 | 2015-02-12T19:51:52.000Z | 2022-03-26T15:34:11.000Z | Lectures/tex/codes/lecture9.py | josh-gree/NumericalMethods | 03cb91114b3f5eb1b56916920ad180d371fe5283 | [
"CC-BY-3.0"
] | 2 | 2017-05-24T19:49:52.000Z | 2018-01-23T21:40:42.000Z | Lectures/tex/codes/lecture9.py | josh-gree/NumericalMethods | 03cb91114b3f5eb1b56916920ad180d371fe5283 | [
"CC-BY-3.0"
] | 41 | 2015-01-05T13:30:47.000Z | 2022-02-15T09:59:39.000Z | import numpy
def functional_iteration(f, x0, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
step = 0
g = lambda x : x - f(x)
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = g(x[step-1])
return x[:step+1]
if __name__=="__main__":
def F(x):
return 1/2*x**2 - 1/52*x**4 - 72/52*x
def dF(x, dx):
return (F(x+dx) - F(x)) / dx
f_1em6 = lambda x : dF(x, 1e-6)
x_df_6 = functional_iteration(f_1em6, 1)
print("Root: ", x_df_6[-1], "iterations", len(x_df_6))
f_1em1 = lambda x : dF(x, 1e-1)
x_df_1 = functional_iteration(f_1em1, 1)
print("Root: ", x_df_1[-1], "iterations", len(x_df_1))
f_5em1 = lambda x : dF(x, 5e-1)
x_df_5 = functional_iteration(f_5em1, 1)
print("Root: ", x_df_5[-1], "iterations", len(x_df_5)) | 32.074074 | 58 | 0.564665 | import numpy
def functional_iteration(f, x0, max_steps=100, tol=1e-10):
x = numpy.zeros(max_steps+1)
x[0] = x0
step = 0
g = lambda x : x - f(x)
while abs(f(x[step])) > tol and step < max_steps:
step = step + 1
x[step] = g(x[step-1])
return x[:step+1]
if __name__=="__main__":
def F(x):
return 1/2*x**2 - 1/52*x**4 - 72/52*x
def dF(x, dx):
return (F(x+dx) - F(x)) / dx
f_1em6 = lambda x : dF(x, 1e-6)
x_df_6 = functional_iteration(f_1em6, 1)
print("Root: ", x_df_6[-1], "iterations", len(x_df_6))
f_1em1 = lambda x : dF(x, 1e-1)
x_df_1 = functional_iteration(f_1em1, 1)
print("Root: ", x_df_1[-1], "iterations", len(x_df_1))
f_5em1 = lambda x : dF(x, 5e-1)
x_df_5 = functional_iteration(f_5em1, 1)
print("Root: ", x_df_5[-1], "iterations", len(x_df_5)) | true | true |
f7f60009cec16f6905e0c64c19b1b099693ea817 | 730 | py | Python | src/tele_muninn/message_processor.py | namuan/tele-muninn | 9252b035881dd7618de09530c8494192dc2c00a0 | [
"MIT"
] | null | null | null | src/tele_muninn/message_processor.py | namuan/tele-muninn | 9252b035881dd7618de09530c8494192dc2c00a0 | [
"MIT"
] | null | null | null | src/tele_muninn/message_processor.py | namuan/tele-muninn | 9252b035881dd7618de09530c8494192dc2c00a0 | [
"MIT"
] | null | null | null | import logging
from dataclasses import dataclass
async def download_web_page(url: str) -> None:
logging.info("Downloading web page: %s", url)
def _handle_web_page(web_page_url: str) -> str:
return f"✅ Processed web page: {web_page_url}"
def _process_message(update_message_text: str) -> str:
if update_message_text.startswith("http"):
return _handle_web_page(update_message_text)
return f'Unknown command "{update_message_text}"'
@dataclass
class IncomingMessage:
text: str
def handle_cmd(incoming_message: IncomingMessage) -> str:
update_message_text = incoming_message.text
logging.info("Received message: %s", update_message_text)
return _process_message(update_message_text)
| 25.172414 | 61 | 0.756164 | import logging
from dataclasses import dataclass
async def download_web_page(url: str) -> None:
logging.info("Downloading web page: %s", url)
def _handle_web_page(web_page_url: str) -> str:
return f"✅ Processed web page: {web_page_url}"
def _process_message(update_message_text: str) -> str:
if update_message_text.startswith("http"):
return _handle_web_page(update_message_text)
return f'Unknown command "{update_message_text}"'
@dataclass
class IncomingMessage:
text: str
def handle_cmd(incoming_message: IncomingMessage) -> str:
update_message_text = incoming_message.text
logging.info("Received message: %s", update_message_text)
return _process_message(update_message_text)
| true | true |
f7f60080b35f974b2a1f446edb633a4b007370e8 | 2,556 | py | Python | test/programytest/parser/template/node_tests/test_first.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/parser/template/node_tests/test_first.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/parser/template/node_tests/test_first.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.first import TemplateFirstNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateFirstNode(TemplateFirstNode):
def __init__(self):
TemplateFirstNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateFirstNodeTests(ParserTestsBaseClass):
def test_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateFirstNode()
self.assertIsNotNone(node)
root.append(node)
word1 = TemplateWordNode("Word1")
node.append(word1)
word2 = TemplateWordNode("Word2")
node.append(word2)
word3 = TemplateWordNode("Word3")
node.append(word3)
self.assertEqual(root.resolve(self._client_context), "Word1")
def test_to_xml(self):
root = TemplateNode()
node = TemplateFirstNode()
root.append(node)
word1 = TemplateWordNode("Word1")
node.append(word1)
word2 = TemplateWordNode("Word2")
node.append(word2)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><first>Word1 Word2</first></template>", xml_str)
def test_node_no_words(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateFirstNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(root.resolve(self._client_context), "NIL")
def test_to_xml_no_words(self):
root = TemplateNode()
node = TemplateFirstNode()
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><first /></template>", xml_str)
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateFirstNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result) | 30.795181 | 84 | 0.666667 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.first import TemplateFirstNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateFirstNode(TemplateFirstNode):
def __init__(self):
TemplateFirstNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateFirstNodeTests(ParserTestsBaseClass):
def test_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateFirstNode()
self.assertIsNotNone(node)
root.append(node)
word1 = TemplateWordNode("Word1")
node.append(word1)
word2 = TemplateWordNode("Word2")
node.append(word2)
word3 = TemplateWordNode("Word3")
node.append(word3)
self.assertEqual(root.resolve(self._client_context), "Word1")
def test_to_xml(self):
root = TemplateNode()
node = TemplateFirstNode()
root.append(node)
word1 = TemplateWordNode("Word1")
node.append(word1)
word2 = TemplateWordNode("Word2")
node.append(word2)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><first>Word1 Word2</first></template>", xml_str)
def test_node_no_words(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateFirstNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(root.resolve(self._client_context), "NIL")
def test_to_xml_no_words(self):
root = TemplateNode()
node = TemplateFirstNode()
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><first /></template>", xml_str)
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateFirstNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result) | true | true |
f7f600a85b26969b7b63010c250c42a7dbeb92da | 9,869 | py | Python | pdftools/_cli.py | jrhawley/pdftools | febb6e7bf6894d5be15d850828e7cc03671ee6d2 | [
"MIT"
] | 59 | 2018-05-04T11:05:33.000Z | 2022-02-07T23:22:22.000Z | pdftools/_cli.py | jrhawley/pdftools | febb6e7bf6894d5be15d850828e7cc03671ee6d2 | [
"MIT"
] | 13 | 2018-04-10T16:38:32.000Z | 2022-01-17T21:31:18.000Z | pdftools/_cli.py | jrhawley/pdftools | febb6e7bf6894d5be15d850828e7cc03671ee6d2 | [
"MIT"
] | 17 | 2019-01-23T02:41:03.000Z | 2022-03-02T20:34:03.000Z | import os.path as path
import argparse
import re
from . import __version__
def main():
PARSER = argparse.ArgumentParser(
description="Python-based command line tool for manipulating PDFs. It is based on the PyPdf2 package.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# global options
PARSER.add_argument(
"-V", "--version", action="store_true", help="Print version number and exit"
)
SUBPARSERS = PARSER.add_subparsers(
title="Sub-commands", dest="command", metavar="<command>"
)
# Add
# --------------------------------------------
parser_add = SUBPARSERS.add_parser(
"add",
help="Add pages from a source file to an output PDF file",
description="Add pages from a source file to an output PDF file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_add.add_argument("dest", type=str, help="Destination PDF file")
parser_add.add_argument("src", type=str, default=None, help="PDF source file")
parser_add.add_argument(
"-p",
"--pages",
nargs="+",
help="list of pages to add to the output. Examples: 5; 1-9; 1-; -9",
)
# output
parser_add.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
# Copy
# --------------------------------------------
parser_copy = SUBPARSERS.add_parser(
"copy",
help="Copy specific pages of a PDF file in a new file",
description="Copy specific pages of a PDF file in a new file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_copy.add_argument(
"src", type=str, default=None, help="Source PDF containing pages to copy"
)
parser_copy.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
parser_copy.add_argument(
"-p",
"--pages",
dest="pages",
type=str,
nargs="+",
default=1,
help="list of pages to copy in the new file. "
"Examples: \n"
'"5 8 10": Pages 5, 8, 10; '
'"1-9": Pages 1 to 9; '
'"5-": Pages from 5 to last page; '
'"-9": Pages from beginning to 9',
)
parser_copy.add_argument("-y", action="store_true", help="yes to all")
# Insert
# --------------------------------------------
parser_insert = SUBPARSERS.add_parser(
"insert",
help="Insert pages of one file into another",
description="Insert pages of one file into another",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_insert.add_argument("dest", type=str, help="Destination PDF file")
parser_insert.add_argument("src", type=str, help="Source PDF file")
parser_insert.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
parser_insert.add_argument(
"-p",
"--pages",
nargs="+",
help="List of page numbers (start with 1) which will be inserted. If None, all pages will be inserted (default). Examples: 5; 1-9; 1-; -9",
)
parser_insert.add_argument(
"-i",
"--index",
type=int,
default=None,
help="Page number (1-indexed) of destination file where the pages will be inserted. If None they will be added at the end of the file",
)
# Merge
# --------------------------------------------
parser_merge = SUBPARSERS.add_parser(
"merge",
help="Merge the pages of multiple input files into one output file",
description="Merge the pages of multiple input files into one output file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_merge.add_argument(
"src", type=str, default=None, nargs="+", help="List of input source files"
)
parser_merge.add_argument(
"-o", "--output", type=str, default="merged.pdf", help="Output filename",
)
parser_merge.add_argument(
"-d", "--delete", action="store_true", help="Delete source files after merge",
)
# Remove
# --------------------------------------------
parser_remove = SUBPARSERS.add_parser(
"remove",
help="Remove pages from a PDF file",
description="Remove pages from a PDF file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_remove.add_argument("src", type=str, default=None, help="PDF source file")
parser_remove.add_argument(
"pages",
nargs="+",
help="List of pages to remove from file. Examples: 5; 1-9; 1-; -9",
)
# output
parser_remove.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `src` file will be overwritten",
)
# Rotate
# --------------------------------------------
parser_rotate = SUBPARSERS.add_parser(
"rotate",
help="Rotate the pages of a PDF file by a set number of degrees",
description="Rotate the pages of a PDF file by a set number of degrees",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_rotate.add_argument("src", type=str, default=None, help="Source file")
parser_rotate.add_argument(
"-d",
"--degrees",
choices=(90, 180, 270),
default=90,
type=int,
help="Specify degrees value to rotate page(s)"
)
parser_rotate.add_argument(
"-c",
"--counter-clockwise",
action="store_true",
dest="counter_clockwise",
help="Rotate pages counter-clockwise instead of clockwise, by default",
)
parser_rotate.add_argument(
"-p",
"--pages",
nargs="+",
default=None,
help="List of page numbers which will be rotated. If None, all pages will be rotated. Examples: 5; 1-9; 1-; -9",
)
parser_rotate.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output filename. If None, the source file will be overwritten",
)
# Split
# --------------------------------------------
parser_split = SUBPARSERS.add_parser(
"split",
help="Split a PDF file into multiple documents",
description="Split a PDF file into multiple documents",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_split.add_argument(
"src", type=str, default=None, help="Source file to be split",
)
parser_split.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output filenames. If None, will append page numbers to the input file name.",
)
parser_split.add_argument(
"-s",
"--stepsize",
dest="stepsize",
type=int,
default=1,
help="How many pages are packed in each output file",
)
parser_split.add_argument(
"-q",
"--sequence",
dest="sequence",
nargs="+",
help="Sequence of numbers describing how many pages to put in each outputfile",
)
# Zip
# --------------------------------------------
parser_zip = SUBPARSERS.add_parser(
"zip",
help="Python-like zipping (interleaving) the pages of two documents in one output file",
description="Python-like zipping (interleaving) the pages of two documents in one output file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_zip.add_argument("src1", type=str, help="First source file")
parser_zip.add_argument("src2", type=str, help="Second source file")
parser_zip.add_argument(
"output", type=str, help="Name of the output file",
)
# delete
parser_zip.add_argument(
"-d", "--delete", action="store_true", help="Delete source files after merge"
)
# revert
parser_zip.add_argument(
"-r",
"--revert",
action="store_true",
help="Revert the pages of second input file",
)
# parse arguments from command line
ARGS = PARSER.parse_args()
# validate command line arguments for the give sub-command
# import packages after parsing to speed up command line responsiveness
if ARGS.version:
print("pdftools v{}".format(__version__))
return
if ARGS.command == "add":
from pdftools.pdftools import pdf_add
pdf_add(ARGS.dest, ARGS.src, ARGS.pages, ARGS.output)
elif ARGS.command == "copy":
from pdftools.pdftools import pdf_copy
pdf_copy(ARGS.src, ARGS.output, ARGS.pages, ARGS.y)
elif ARGS.command == "insert":
from pdftools.pdftools import pdf_insert
pdf_insert(ARGS.dest, ARGS.src, ARGS.pages, ARGS.index, ARGS.output)
elif ARGS.command == "merge":
from pdftools.pdftools import pdf_merge
pdf_merge(ARGS.src, ARGS.output, ARGS.delete)
elif ARGS.command == "remove":
from pdftools.pdftools import pdf_remove
pdf_remove(ARGS.src, ARGS.pages, ARGS.output)
elif ARGS.command == "rotate":
from pdftools.pdftools import pdf_rotate
pdf_rotate(ARGS.src, ARGS.degrees, ARGS.counter_clockwise, ARGS.pages, ARGS.output)
elif ARGS.command == "split":
from pdftools.pdftools import pdf_split
pdf_split(ARGS.src, ARGS.output, ARGS.stepsize, ARGS.sequence)
elif ARGS.command == "zip":
from pdftools.pdftools import pdf_zip
pdf_zip(ARGS.src1, ARGS.src2, ARGS.output, ARGS.delete, ARGS.revert)
| 33.914089 | 147 | 0.594082 | import os.path as path
import argparse
import re
from . import __version__
def main():
PARSER = argparse.ArgumentParser(
description="Python-based command line tool for manipulating PDFs. It is based on the PyPdf2 package.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
PARSER.add_argument(
"-V", "--version", action="store_true", help="Print version number and exit"
)
SUBPARSERS = PARSER.add_subparsers(
title="Sub-commands", dest="command", metavar="<command>"
)
parser_add = SUBPARSERS.add_parser(
"add",
help="Add pages from a source file to an output PDF file",
description="Add pages from a source file to an output PDF file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_add.add_argument("dest", type=str, help="Destination PDF file")
parser_add.add_argument("src", type=str, default=None, help="PDF source file")
parser_add.add_argument(
"-p",
"--pages",
nargs="+",
help="list of pages to add to the output. Examples: 5; 1-9; 1-; -9",
)
parser_add.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
parser_copy = SUBPARSERS.add_parser(
"copy",
help="Copy specific pages of a PDF file in a new file",
description="Copy specific pages of a PDF file in a new file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_copy.add_argument(
"src", type=str, default=None, help="Source PDF containing pages to copy"
)
parser_copy.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
parser_copy.add_argument(
"-p",
"--pages",
dest="pages",
type=str,
nargs="+",
default=1,
help="list of pages to copy in the new file. "
"Examples: \n"
'"5 8 10": Pages 5, 8, 10; '
'"1-9": Pages 1 to 9; '
'"5-": Pages from 5 to last page; '
'"-9": Pages from beginning to 9',
)
parser_copy.add_argument("-y", action="store_true", help="yes to all")
parser_insert = SUBPARSERS.add_parser(
"insert",
help="Insert pages of one file into another",
description="Insert pages of one file into another",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_insert.add_argument("dest", type=str, help="Destination PDF file")
parser_insert.add_argument("src", type=str, help="Source PDF file")
parser_insert.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `dest` file will be overwritten",
)
parser_insert.add_argument(
"-p",
"--pages",
nargs="+",
help="List of page numbers (start with 1) which will be inserted. If None, all pages will be inserted (default). Examples: 5; 1-9; 1-; -9",
)
parser_insert.add_argument(
"-i",
"--index",
type=int,
default=None,
help="Page number (1-indexed) of destination file where the pages will be inserted. If None they will be added at the end of the file",
)
parser_merge = SUBPARSERS.add_parser(
"merge",
help="Merge the pages of multiple input files into one output file",
description="Merge the pages of multiple input files into one output file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_merge.add_argument(
"src", type=str, default=None, nargs="+", help="List of input source files"
)
parser_merge.add_argument(
"-o", "--output", type=str, default="merged.pdf", help="Output filename",
)
parser_merge.add_argument(
"-d", "--delete", action="store_true", help="Delete source files after merge",
)
parser_remove = SUBPARSERS.add_parser(
"remove",
help="Remove pages from a PDF file",
description="Remove pages from a PDF file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_remove.add_argument("src", type=str, default=None, help="PDF source file")
parser_remove.add_argument(
"pages",
nargs="+",
help="List of pages to remove from file. Examples: 5; 1-9; 1-; -9",
)
parser_remove.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Name of the output file. If None, the `src` file will be overwritten",
)
parser_rotate = SUBPARSERS.add_parser(
"rotate",
help="Rotate the pages of a PDF file by a set number of degrees",
description="Rotate the pages of a PDF file by a set number of degrees",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_rotate.add_argument("src", type=str, default=None, help="Source file")
parser_rotate.add_argument(
"-d",
"--degrees",
choices=(90, 180, 270),
default=90,
type=int,
help="Specify degrees value to rotate page(s)"
)
parser_rotate.add_argument(
"-c",
"--counter-clockwise",
action="store_true",
dest="counter_clockwise",
help="Rotate pages counter-clockwise instead of clockwise, by default",
)
parser_rotate.add_argument(
"-p",
"--pages",
nargs="+",
default=None,
help="List of page numbers which will be rotated. If None, all pages will be rotated. Examples: 5; 1-9; 1-; -9",
)
parser_rotate.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output filename. If None, the source file will be overwritten",
)
parser_split = SUBPARSERS.add_parser(
"split",
help="Split a PDF file into multiple documents",
description="Split a PDF file into multiple documents",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_split.add_argument(
"src", type=str, default=None, help="Source file to be split",
)
parser_split.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output filenames. If None, will append page numbers to the input file name.",
)
parser_split.add_argument(
"-s",
"--stepsize",
dest="stepsize",
type=int,
default=1,
help="How many pages are packed in each output file",
)
parser_split.add_argument(
"-q",
"--sequence",
dest="sequence",
nargs="+",
help="Sequence of numbers describing how many pages to put in each outputfile",
)
parser_zip = SUBPARSERS.add_parser(
"zip",
help="Python-like zipping (interleaving) the pages of two documents in one output file",
description="Python-like zipping (interleaving) the pages of two documents in one output file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_zip.add_argument("src1", type=str, help="First source file")
parser_zip.add_argument("src2", type=str, help="Second source file")
parser_zip.add_argument(
"output", type=str, help="Name of the output file",
)
parser_zip.add_argument(
"-d", "--delete", action="store_true", help="Delete source files after merge"
)
parser_zip.add_argument(
"-r",
"--revert",
action="store_true",
help="Revert the pages of second input file",
)
ARGS = PARSER.parse_args()
if ARGS.version:
print("pdftools v{}".format(__version__))
return
if ARGS.command == "add":
from pdftools.pdftools import pdf_add
pdf_add(ARGS.dest, ARGS.src, ARGS.pages, ARGS.output)
elif ARGS.command == "copy":
from pdftools.pdftools import pdf_copy
pdf_copy(ARGS.src, ARGS.output, ARGS.pages, ARGS.y)
elif ARGS.command == "insert":
from pdftools.pdftools import pdf_insert
pdf_insert(ARGS.dest, ARGS.src, ARGS.pages, ARGS.index, ARGS.output)
elif ARGS.command == "merge":
from pdftools.pdftools import pdf_merge
pdf_merge(ARGS.src, ARGS.output, ARGS.delete)
elif ARGS.command == "remove":
from pdftools.pdftools import pdf_remove
pdf_remove(ARGS.src, ARGS.pages, ARGS.output)
elif ARGS.command == "rotate":
from pdftools.pdftools import pdf_rotate
pdf_rotate(ARGS.src, ARGS.degrees, ARGS.counter_clockwise, ARGS.pages, ARGS.output)
elif ARGS.command == "split":
from pdftools.pdftools import pdf_split
pdf_split(ARGS.src, ARGS.output, ARGS.stepsize, ARGS.sequence)
elif ARGS.command == "zip":
from pdftools.pdftools import pdf_zip
pdf_zip(ARGS.src1, ARGS.src2, ARGS.output, ARGS.delete, ARGS.revert)
| true | true |
f7f60181f40c19cf1253e09f332b7c856b23c7ee | 4,058 | py | Python | source/GUIscripts/plot2DEQ.py | plasmapotential/HEAT | 864c932fac616a59d4b2d07a9b45dfb137f7e6b8 | [
"MIT"
] | 29 | 2020-07-17T15:18:57.000Z | 2022-02-07T02:23:12.000Z | source/GUIscripts/plot2DEQ.py | HongLouyemeng/HEAT | bee84ceffbfc022cdc202ef67c87e469ff6b9e91 | [
"MIT"
] | 6 | 2020-10-02T12:24:43.000Z | 2022-03-29T14:25:51.000Z | source/GUIscripts/plot2DEQ.py | HongLouyemeng/HEAT | bee84ceffbfc022cdc202ef67c87e469ff6b9e91 | [
"MIT"
] | 5 | 2021-02-01T07:41:44.000Z | 2022-03-07T15:18:27.000Z | #plotEQ.py
#Description: Plots Equilibrium (2D) from gfile for pyqt5 application
#Engineer: T Looby
#Date: 20190916
import matplotlib.pyplot as plt
import numpy as np
import MDSplus
import EFIT.equilParams_class as EP
from scipy import interpolate
from scipy.interpolate import interp1d
def EQ2Dplot(ep,shot,t,MachFlag,height=None):
#Seperatrix
rbdry = ep.g['lcfs'][:,0]
zbdry = ep.g['lcfs'][:,1]
if MachFlag == 'nstx':
rlim, zlim = nstxu_wall(oldwall=False) #FOR NSTXU
else:
rlim = ep.g['wall'][:,0]
zlim = ep.g['wall'][:,1]
#Change the linspace to get higher psi_n (outside wall)
psi = ep.g['psiRZn']
#psi = ep.g['psiRZ']
levels = sorted(np.append([0.0,0.05,0.1,0.25,0.5,0.75,1.0], np.linspace(1.01,psi.max(),15)))
R, Z = np.meshgrid(ep.g['R'],ep.g['Z'])
#psiMax = ep.psiFunc.ev(max(rlim),0.0)
psiMax = psi.max()
commonLev = np.linspace(1.0,psiMax,15)
lcfs = [1.0]
if height is None:
plt.figure(figsize=(5,8))
else:
dpi = 80
#w = width / dpi
#h = 8.0/5.0 * w
h = height / dpi
w = 5.0/8.0 * h
if w % 2 == 0:
pass
else:
w=w+1
plt.figure(figsize=(w,h), dpi=dpi)
#Color Contour Plot
#CS = plt.contourf(R,Z,psi,levels,cmap=plt.cm.bone)
CS = plt.contourf(R,Z,psi,levels,cmap=plt.cm.cividis)
#Draw Flux Lines in Common Flux Region
plt.contour(CS, levels = commonLev, colors=('white',),linestyles='dotted',linewidths=(1,))
#Draw separatrix as red line
plt.contour(CS, levels = lcfs, colors=('r',),linestyles=('-',),linewidths=(2,))
plt.axes().set_aspect('equal')
#ax.set_aspect('equal')
plt.xlabel('R [m]', fontsize=22,color='w')
plt.ylabel('Z [m]', fontsize=22,color='w')
plt.tick_params(axis='both',colors='w')
#plt.xlim(min(rlim)-0.02,1.6)
# plt.xlim(0.0,1.6)
# plt.ylim(-1.7,1.7)
plt.title("{:06d} @ {:05d}ms".format(shot,t), fontsize=22, color='white')
#plt.colorbar(CS, label=r'$\psi$')
#Fill in missing limiter section
rlim_patched = np.append(rlim[2:], rlim[2])
zlim_patched = np.append(zlim[2:], zlim[2])
#plt.plot(rlim_patched, zlim_patched,'k--')
plt.plot(rlim, zlim, '--', color='lime', lw=2)
return plt
def nstxu_wall(oldwall=False):
"""
returns simplified wall. Uses two different wall versions
"""
if oldwall:
R = np.array([0.1851, 0.1851, 0.2794, 0.2794, 0.2979, 0.5712,
1.0433, 1.3192, 1.3358,
1.4851, 1.4791, 1.5174, 1.5313, 1.5464, 1.5608,
1.567, 1.5657, 1.5543, 1.5341, 1.5181, 1.4818,
1.4851, 1.3358, 1.3192, 1.0433,
0.5712, 0.2979, 0.2794, 0.2794, 0.1851, 0.1851])
Z = np.array([0.0, 1.0081, 1.1714, 1.578, 1.6034, 1.6034,
1.43, 1.0397, 0.9976,
0.545, 0.4995, 0.306, 0.2355, 0.1586, 0.0801,
0.0, -0.0177, -0.1123, -0.221, -0.3026, -0.486,
-0.545, -0.9976, -1.0397, -1.43,
-1.6034, -1.6034, -1.578, -1.1714, -1.0081, 0])
else:
R = np.array([ 0.3147568, 0.3147568, 0.4441952, 0.4441952, 0.443484 ,
0.443484 , 0.6000496, 0.7672832, 0.8499856, 1.203452, 1.3192, 1.3358, 1.4851, 1.489 ,
1.5638, 1.57 , 1.5737, 1.575 , 1.5737, 1.57 , 1.5638,
1.489 , 1.4851, 1.3358, 1.3192, 1.203452 , 0.8499856, 0.7672832, 0.6000496, 0.443484 ,
0.443484 , 0.4441952, 0.4441952, 0.3147568, 0.3147568 ])
Z = np.array([ 0. , 1.0499344, 1.2899136, 1.5104872, 1.5104872,
1.6028416, 1.6028416, 1.5367 , 1.5367 , 1.397508, 1.0397, 0.9976, 0.545 , 0.49 ,
0.1141, 0.0764, 0.0383, 0. , -0.0383, -0.0764, -0.1141,
-0.49 , -0.545 , -0.9976, -1.0397, -1.397508 , -1.5367 , -1.5367 , -1.6028416, -1.6028416,
-1.5104872, -1.5104872, -1.2899136, -1.0499344, 0.])
return R,Z
| 41.408163 | 107 | 0.542632 |
import matplotlib.pyplot as plt
import numpy as np
import MDSplus
import EFIT.equilParams_class as EP
from scipy import interpolate
from scipy.interpolate import interp1d
def EQ2Dplot(ep,shot,t,MachFlag,height=None):
rbdry = ep.g['lcfs'][:,0]
zbdry = ep.g['lcfs'][:,1]
if MachFlag == 'nstx':
rlim, zlim = nstxu_wall(oldwall=False)
else:
rlim = ep.g['wall'][:,0]
zlim = ep.g['wall'][:,1]
psi = ep.g['psiRZn']
levels = sorted(np.append([0.0,0.05,0.1,0.25,0.5,0.75,1.0], np.linspace(1.01,psi.max(),15)))
R, Z = np.meshgrid(ep.g['R'],ep.g['Z'])
psiMax = psi.max()
commonLev = np.linspace(1.0,psiMax,15)
lcfs = [1.0]
if height is None:
plt.figure(figsize=(5,8))
else:
dpi = 80
h = height / dpi
w = 5.0/8.0 * h
if w % 2 == 0:
pass
else:
w=w+1
plt.figure(figsize=(w,h), dpi=dpi)
CS = plt.contourf(R,Z,psi,levels,cmap=plt.cm.cividis)
plt.contour(CS, levels = commonLev, colors=('white',),linestyles='dotted',linewidths=(1,))
plt.contour(CS, levels = lcfs, colors=('r',),linestyles=('-',),linewidths=(2,))
plt.axes().set_aspect('equal')
plt.xlabel('R [m]', fontsize=22,color='w')
plt.ylabel('Z [m]', fontsize=22,color='w')
plt.tick_params(axis='both',colors='w')
plt.title("{:06d} @ {:05d}ms".format(shot,t), fontsize=22, color='white')
rlim_patched = np.append(rlim[2:], rlim[2])
zlim_patched = np.append(zlim[2:], zlim[2])
plt.plot(rlim, zlim, '--', color='lime', lw=2)
return plt
def nstxu_wall(oldwall=False):
if oldwall:
R = np.array([0.1851, 0.1851, 0.2794, 0.2794, 0.2979, 0.5712,
1.0433, 1.3192, 1.3358,
1.4851, 1.4791, 1.5174, 1.5313, 1.5464, 1.5608,
1.567, 1.5657, 1.5543, 1.5341, 1.5181, 1.4818,
1.4851, 1.3358, 1.3192, 1.0433,
0.5712, 0.2979, 0.2794, 0.2794, 0.1851, 0.1851])
Z = np.array([0.0, 1.0081, 1.1714, 1.578, 1.6034, 1.6034,
1.43, 1.0397, 0.9976,
0.545, 0.4995, 0.306, 0.2355, 0.1586, 0.0801,
0.0, -0.0177, -0.1123, -0.221, -0.3026, -0.486,
-0.545, -0.9976, -1.0397, -1.43,
-1.6034, -1.6034, -1.578, -1.1714, -1.0081, 0])
else:
R = np.array([ 0.3147568, 0.3147568, 0.4441952, 0.4441952, 0.443484 ,
0.443484 , 0.6000496, 0.7672832, 0.8499856, 1.203452, 1.3192, 1.3358, 1.4851, 1.489 ,
1.5638, 1.57 , 1.5737, 1.575 , 1.5737, 1.57 , 1.5638,
1.489 , 1.4851, 1.3358, 1.3192, 1.203452 , 0.8499856, 0.7672832, 0.6000496, 0.443484 ,
0.443484 , 0.4441952, 0.4441952, 0.3147568, 0.3147568 ])
Z = np.array([ 0. , 1.0499344, 1.2899136, 1.5104872, 1.5104872,
1.6028416, 1.6028416, 1.5367 , 1.5367 , 1.397508, 1.0397, 0.9976, 0.545 , 0.49 ,
0.1141, 0.0764, 0.0383, 0. , -0.0383, -0.0764, -0.1141,
-0.49 , -0.545 , -0.9976, -1.0397, -1.397508 , -1.5367 , -1.5367 , -1.6028416, -1.6028416,
-1.5104872, -1.5104872, -1.2899136, -1.0499344, 0.])
return R,Z
| true | true |
f7f601eacdf0d3968221b99c81a51e8970172226 | 5,870 | py | Python | models/setting.py | usmbc00/iosconfig | 3e67041ece413cc01f40a0b05ce75c187c0ab61a | [
"Apache-2.0"
] | 15 | 2015-01-14T21:13:05.000Z | 2021-10-12T01:19:14.000Z | models/setting.py | usmbc00/iosconfig | 3e67041ece413cc01f40a0b05ce75c187c0ab61a | [
"Apache-2.0"
] | null | null | null | models/setting.py | usmbc00/iosconfig | 3e67041ece413cc01f40a0b05ce75c187c0ab61a | [
"Apache-2.0"
] | 20 | 2015-01-29T02:52:26.000Z | 2022-02-25T00:07:11.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models individual setting values used to configure the application."""
import logging
from wtforms import validators
from google.appengine.ext import ndb
_LOG = logging.getLogger('google_password_generator.setting')
class Setting(ndb.Model):
"""Models the setting values used to configure the application.
Each setting value would be in a field.
There should be only one setting entity, with an id of current_settings.
This also means that this setting entity is a root entity, without any parent.
"""
create_new_password_message = ndb.StringProperty(required=True)
default_password_length = ndb.IntegerProperty(required=True)
domain_admin_account = ndb.StringProperty(required=True)
email_body_for_ios_profile_download_notification = (
ndb.StringProperty(required=True))
email_subject_for_ios_profile_download_notification = (
ndb.StringProperty(required=True))
enable_ios_profile_download = ndb.StringProperty()
error_message = ndb.StringProperty(required=True)
group_with_access_permission = ndb.StringProperty(required=True)
ios_profile_template_filename = ndb.StringProperty(required=True)
password_created_message = ndb.StringProperty(required=True)
private_key_filename = ndb.StringProperty(required=True)
remove_ambiguous_characters_in_password = ndb.StringProperty()
service_account = ndb.StringProperty(required=True)
thank_you_message = ndb.StringProperty(required=True)
use_digits_in_password = ndb.StringProperty()
use_punctuation_in_password = ndb.StringProperty()
use_uppercase_in_password = ndb.StringProperty()
@staticmethod
def GetCurrentSettings():
"""Return a setting entity, specifically the current_settings."""
return Setting.get_by_id('current_settings')
@staticmethod
def UpdateCurrentSettings(new_settings):
"""Update values for all setting entities.
Note about checkbox form elements:
Checkbox form elements are submitted in the form with a value of 'on'.
They're left out of the post when they're unchecked, so the boolean value
in the data store should be 'off'.
See more: http://www.w3.org/TR/html401/interact/forms.html#h-17.2.1
Args:
new_settings: a dictionary of the new settings to be updated
Returns:
boolean, true if update has completed successfully
"""
current_settings = Setting(id='current_settings')
current_settings.create_new_password_message = new_settings[
'create_new_password_message']
current_settings.default_password_length = int(new_settings[
'default_password_length'])
current_settings.domain_admin_account = new_settings[
'domain_admin_account']
if not new_settings['enable_ios_profile_download']:
new_settings['enable_ios_profile_download'] = 'off'
current_settings.enable_ios_profile_download = new_settings.get(
'enable_ios_profile_download')
current_settings.email_subject_for_ios_profile_download_notification = (
new_settings.get('email_subject_for_ios_profile_download_notification'))
current_settings.email_body_for_ios_profile_download_notification = (
new_settings.get('email_body_for_ios_profile_download_notification'))
current_settings.error_message = new_settings['error_message']
current_settings.group_with_access_permission = new_settings[
'group_with_access_permission']
current_settings.ios_profile_template_filename = new_settings[
'ios_profile_template_filename']
current_settings.password_created_message = new_settings[
'password_created_message']
current_settings.private_key_filename = new_settings[
'private_key_filename']
if not new_settings['remove_ambiguous_characters_in_password']:
new_settings['remove_ambiguous_characters_in_password'] = 'off'
current_settings.remove_ambiguous_characters_in_password = new_settings.get(
'remove_ambiguous_characters_in_password')
current_settings.service_account = new_settings['service_account']
current_settings.thank_you_message = new_settings['thank_you_message']
if not new_settings['use_digits_in_password']:
new_settings['use_digits_in_password'] = 'off'
current_settings.use_digits_in_password = new_settings.get(
'use_digits_in_password')
if not new_settings['use_punctuation_in_password']:
new_settings['use_punctuation_in_password'] = 'off'
current_settings.use_punctuation_in_password = new_settings.get(
'use_punctuation_in_password')
if not new_settings['use_uppercase_in_password']:
new_settings['use_uppercase_in_password'] = 'off'
current_settings.use_uppercase_in_password = new_settings.get(
'use_uppercase_in_password')
current_settings.put()
return True
@staticmethod
def GetAdditionalValidators():
"""Get additional wtforms validators that we can use on the server-side."""
return {
'default_password_length': {
'validators': [validators.NumberRange(min=8, max=20)]
},
'domain_admin_account': {
'validators': [validators.Email()]
},
'group_with_access_permission': {
'validators': [validators.Email()]
},
'service_account': {
'validators': [validators.Email()]
}
}
| 40.763889 | 80 | 0.759114 |
import logging
from wtforms import validators
from google.appengine.ext import ndb
_LOG = logging.getLogger('google_password_generator.setting')
class Setting(ndb.Model):
create_new_password_message = ndb.StringProperty(required=True)
default_password_length = ndb.IntegerProperty(required=True)
domain_admin_account = ndb.StringProperty(required=True)
email_body_for_ios_profile_download_notification = (
ndb.StringProperty(required=True))
email_subject_for_ios_profile_download_notification = (
ndb.StringProperty(required=True))
enable_ios_profile_download = ndb.StringProperty()
error_message = ndb.StringProperty(required=True)
group_with_access_permission = ndb.StringProperty(required=True)
ios_profile_template_filename = ndb.StringProperty(required=True)
password_created_message = ndb.StringProperty(required=True)
private_key_filename = ndb.StringProperty(required=True)
remove_ambiguous_characters_in_password = ndb.StringProperty()
service_account = ndb.StringProperty(required=True)
thank_you_message = ndb.StringProperty(required=True)
use_digits_in_password = ndb.StringProperty()
use_punctuation_in_password = ndb.StringProperty()
use_uppercase_in_password = ndb.StringProperty()
@staticmethod
def GetCurrentSettings():
return Setting.get_by_id('current_settings')
@staticmethod
def UpdateCurrentSettings(new_settings):
current_settings = Setting(id='current_settings')
current_settings.create_new_password_message = new_settings[
'create_new_password_message']
current_settings.default_password_length = int(new_settings[
'default_password_length'])
current_settings.domain_admin_account = new_settings[
'domain_admin_account']
if not new_settings['enable_ios_profile_download']:
new_settings['enable_ios_profile_download'] = 'off'
current_settings.enable_ios_profile_download = new_settings.get(
'enable_ios_profile_download')
current_settings.email_subject_for_ios_profile_download_notification = (
new_settings.get('email_subject_for_ios_profile_download_notification'))
current_settings.email_body_for_ios_profile_download_notification = (
new_settings.get('email_body_for_ios_profile_download_notification'))
current_settings.error_message = new_settings['error_message']
current_settings.group_with_access_permission = new_settings[
'group_with_access_permission']
current_settings.ios_profile_template_filename = new_settings[
'ios_profile_template_filename']
current_settings.password_created_message = new_settings[
'password_created_message']
current_settings.private_key_filename = new_settings[
'private_key_filename']
if not new_settings['remove_ambiguous_characters_in_password']:
new_settings['remove_ambiguous_characters_in_password'] = 'off'
current_settings.remove_ambiguous_characters_in_password = new_settings.get(
'remove_ambiguous_characters_in_password')
current_settings.service_account = new_settings['service_account']
current_settings.thank_you_message = new_settings['thank_you_message']
if not new_settings['use_digits_in_password']:
new_settings['use_digits_in_password'] = 'off'
current_settings.use_digits_in_password = new_settings.get(
'use_digits_in_password')
if not new_settings['use_punctuation_in_password']:
new_settings['use_punctuation_in_password'] = 'off'
current_settings.use_punctuation_in_password = new_settings.get(
'use_punctuation_in_password')
if not new_settings['use_uppercase_in_password']:
new_settings['use_uppercase_in_password'] = 'off'
current_settings.use_uppercase_in_password = new_settings.get(
'use_uppercase_in_password')
current_settings.put()
return True
@staticmethod
def GetAdditionalValidators():
return {
'default_password_length': {
'validators': [validators.NumberRange(min=8, max=20)]
},
'domain_admin_account': {
'validators': [validators.Email()]
},
'group_with_access_permission': {
'validators': [validators.Email()]
},
'service_account': {
'validators': [validators.Email()]
}
}
| true | true |
f7f601f6566afc46b1aebb5186b9ee2fbfee1f0d | 2,614 | py | Python | gears/jobs.py | xordspar0/gearhead-caramel | 1dc64fc3324d81bcb905a1b6e49f78ea48cdc150 | [
"Apache-2.0"
] | null | null | null | gears/jobs.py | xordspar0/gearhead-caramel | 1dc64fc3324d81bcb905a1b6e49f78ea48cdc150 | [
"Apache-2.0"
] | null | null | null | gears/jobs.py | xordspar0/gearhead-caramel | 1dc64fc3324d81bcb905a1b6e49f78ea48cdc150 | [
"Apache-2.0"
] | null | null | null | import glob
import json
import pbge
from . import stats
import random
SINGLETON_TYPES = dict()
ALL_JOBS = dict()
class Job(object):
def __init__(self,name="Job",skills=(),tags=(),always_combatant=False,skill_modifiers=None,local_requirements=()):
self.name = name
self.skills = set()
for sk in skills:
if sk in SINGLETON_TYPES:
self.skills.add(SINGLETON_TYPES[sk])
else:
print("Unidentified symbol: {} in {}".format(sk,self.name))
self.tags = set()
for t in tags:
if t in SINGLETON_TYPES:
self.tags.add(SINGLETON_TYPES[t])
else:
print("Unidentified symbol: {} in {}".format(t,self.name))
self.always_combatant = always_combatant
self.skill_modifiers = dict()
if skill_modifiers:
for sk,mod in list(skill_modifiers.items()):
self.skill_modifiers[SINGLETON_TYPES[sk]] = mod
self.local_requirements = set()
for t in local_requirements:
if t in SINGLETON_TYPES:
self.local_requirements.add(SINGLETON_TYPES[t])
else:
print("Unidentified symbol: {} in {}".format(t,self.name))
ALL_JOBS[name] = self
#print "{} -> {}".format(name,[s.name for s in self.skills])
def scale_skills(self,pc,rank):
base_skill_rank = max((rank + 20) // 10, 1)
if pc.combatant or self.always_combatant:
for sk in stats.FUNDAMENTAL_COMBATANT_SKILLS:
pc.statline[sk] = max(base_skill_rank + self.skill_modifiers.get(sk,0),1)
for sk in stats.EXTRA_COMBAT_SKILLS:
pc.statline[sk] = max(base_skill_rank//3 + self.skill_modifiers.get(sk, 0), 1)
for sk in self.skills:
pc.statline[sk] = max(base_skill_rank + self.skill_modifiers.get(sk, 0),1)
pc.renown = rank
def __str__(self):
return self.name
def choose_random_job(needed_tags=(),local_tags=()):
lt_set = set(local_tags)
candidates = [job for job in list(ALL_JOBS.values()) if job.tags.issuperset(needed_tags) and lt_set.issuperset(job.local_requirements)]
if candidates:
return random.choice(candidates)
else:
return random.choice(list(ALL_JOBS.values()))
def init_jobs():
protojobs = list()
myfiles = glob.glob(pbge.util.data_dir( "jobs_*.json"))
for f in myfiles:
with open(f, 'rt') as fp:
mylist = json.load(fp)
if mylist:
protojobs += mylist
for j in protojobs:
Job(**j)
| 36.816901 | 139 | 0.60329 | import glob
import json
import pbge
from . import stats
import random
SINGLETON_TYPES = dict()
ALL_JOBS = dict()
class Job(object):
def __init__(self,name="Job",skills=(),tags=(),always_combatant=False,skill_modifiers=None,local_requirements=()):
self.name = name
self.skills = set()
for sk in skills:
if sk in SINGLETON_TYPES:
self.skills.add(SINGLETON_TYPES[sk])
else:
print("Unidentified symbol: {} in {}".format(sk,self.name))
self.tags = set()
for t in tags:
if t in SINGLETON_TYPES:
self.tags.add(SINGLETON_TYPES[t])
else:
print("Unidentified symbol: {} in {}".format(t,self.name))
self.always_combatant = always_combatant
self.skill_modifiers = dict()
if skill_modifiers:
for sk,mod in list(skill_modifiers.items()):
self.skill_modifiers[SINGLETON_TYPES[sk]] = mod
self.local_requirements = set()
for t in local_requirements:
if t in SINGLETON_TYPES:
self.local_requirements.add(SINGLETON_TYPES[t])
else:
print("Unidentified symbol: {} in {}".format(t,self.name))
ALL_JOBS[name] = self
def scale_skills(self,pc,rank):
base_skill_rank = max((rank + 20) // 10, 1)
if pc.combatant or self.always_combatant:
for sk in stats.FUNDAMENTAL_COMBATANT_SKILLS:
pc.statline[sk] = max(base_skill_rank + self.skill_modifiers.get(sk,0),1)
for sk in stats.EXTRA_COMBAT_SKILLS:
pc.statline[sk] = max(base_skill_rank//3 + self.skill_modifiers.get(sk, 0), 1)
for sk in self.skills:
pc.statline[sk] = max(base_skill_rank + self.skill_modifiers.get(sk, 0),1)
pc.renown = rank
def __str__(self):
return self.name
def choose_random_job(needed_tags=(),local_tags=()):
lt_set = set(local_tags)
candidates = [job for job in list(ALL_JOBS.values()) if job.tags.issuperset(needed_tags) and lt_set.issuperset(job.local_requirements)]
if candidates:
return random.choice(candidates)
else:
return random.choice(list(ALL_JOBS.values()))
def init_jobs():
protojobs = list()
myfiles = glob.glob(pbge.util.data_dir( "jobs_*.json"))
for f in myfiles:
with open(f, 'rt') as fp:
mylist = json.load(fp)
if mylist:
protojobs += mylist
for j in protojobs:
Job(**j)
| true | true |
f7f60283610af628ee4fd8c21818314d6b973916 | 6,836 | py | Python | manu_dede/db/db.py | jcd717/manu-dede | 8d6418a4d9330efccb9b5a2afb3e4262cd6031ef | [
"Unlicense"
] | null | null | null | manu_dede/db/db.py | jcd717/manu-dede | 8d6418a4d9330efccb9b5a2afb3e4262cd6031ef | [
"Unlicense"
] | null | null | null | manu_dede/db/db.py | jcd717/manu-dede | 8d6418a4d9330efccb9b5a2afb3e4262cd6031ef | [
"Unlicense"
] | null | null | null | import os
import requests
import sqlite3,csv
import click
from flask import current_app
#from flask import cli (inutile ?)
from flask.cli import with_appcontext
'''
Mode BOURRIN
Je fais une connection globale pour l'application, contrairement au tuto qui fait une connexion dans g (donc si j'ai bien compris par request)
donc problèmes potentiels de concurrence -> lenteur et/ou écritures concurrentes
'''
def get_db():
#current_app.logger.debug("Début d'appel à get_db()")
if current_app.config.get('db') is None:
current_app.config['db'] = sqlite3.connect(
current_app.DB_FILE,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False # pour éviter l'erreur "sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread" mais mystère sur la concurrence
)
current_app.config['db'].row_factory = sqlite3.Row
return current_app.config['db']
# def close_db(e=None):
# db = current_app.config.get('db')
# if db is not None:
# db.close()
# current_app.logger.debug("Fin d'appel à close_db()")
def init_db():
# suppression des fichiers
try:
os.remove(current_app.DB_FILE)
except: pass
try:
for dir in [current_app.staticDownloadPath,current_app.downloadsPath]:
files=os.listdir(dir)
for f in files:
os.remove(dir+'/'+f)
except: pass
db = get_db()
with current_app.open_resource('db/schema.sql') as f:
db.executescript(f.read().decode('utf8'))
def insertDB(url,fileName,save=True):
db=get_db()
db.execute(
"insert into downloads(url,fileName) "
"values(?,?) ",(url,fileName)
)
db.commit()
if save:
saveDB()
def deleteDB(fileName,save=True):
db=get_db()
db.execute(
"delete from downloads "
"where fileName=? ",(fileName,)
)
db.commit()
if save:
saveDB()
colonnesSauvegardees=['url','fileName']
urlSauvegardeHook='/save-downloads.php'
def saveDB():
db=get_db()
lignes = db.execute(
'SELECT * '
'FROM downloads '
'ORDER BY id'
).fetchall()
txt=''
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{colonne}"'
txt+='\n'
for ligne in lignes:
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{ligne[colonne]}"'
txt+='\n'
fileSaveDownloads=current_app.instance_path+'/'+current_app.fileSaveDownloads
with open(fileSaveDownloads,'w') as f:
f.write(txt)
# sauvegarde externe du fichier fileSaveDownloads
url=current_app.urlSaveDownloads+urlSauvegardeHook
content=bytes(txt,'utf8')
datas={'file':current_app.fileSaveDownloads,'content':content}
p=requests.post(url,datas)
def getFileDownlodsExternal():
url=current_app.urlSaveDownloads+'/'+current_app.fileSaveDownloads
res=requests.get(url).content.decode('utf8')
return res
def getCsvFileSaveDownloads(txt=None):
if txt is None: # sauvegarde texte (txt) dans instance
try:
with open(current_app.instance_path+'/'+current_app.fileSaveDownloads) as f:
return list(csv.DictReader(f))
except:
return None
else: # lire le txt issu normalement de la sauvegarde externe
return list(csv.DictReader(txt.splitlines()))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Supprime la base et les fichiers, puis crée la structure de la base"""
init_db()
click.echo('Remise à zéro des fichiers et de la base.')
# "register" des fonctions
def init_app(app):
# app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(showDB)
app.cli.add_command(reloadDownloads)
def getTableContentString(resultSet,colonnes):
widths = []
tavnit = '|'
separator = '+'
for entete in colonnes:
widths.append(len(entete))
for ligne in resultSet:
index=0
for colonne in colonnes:
widths[index]=max(widths[index],len(str(ligne[colonne])))
index+=1
for w in widths:
tavnit += " %-"+"%ss |" % (w,)
separator += '-'*w + '--+'
res=separator+'\n'
res+= tavnit % tuple(colonnes)+'\n'
res+= separator+'\n'
for ligne in resultSet:
tab=[]
for colonne in colonnes:
tab.append(ligne[colonne])
res+= tavnit % tuple(tab)+'\n'
res+= separator+'\n'
res+= f"{len(resultSet)} ligne{'' if len(resultSet)<=1 else 's'}"
return res
@click.command('show-db')
@with_appcontext
def showDB():
"""Affiche le contenu de la base"""
db=get_db()
tout = db.execute(
'SELECT * '
'FROM downloads'
).fetchall()
colonnes=['url','fileName']
click.echo(getTableContentString(tout,colonnes))
from ..download import telecharger
@click.command('reload-downloads')
@with_appcontext
def reloadDownloads():
'''
Vérifie la cohérence des fichiers dans les "downloads", de la base
et recharge ceux qui manquent en fonction de la sauvegarde externe
'''
filesStatic=os.listdir(current_app.staticDownloadPath)
filesInstance=os.listdir(current_app.downloadsPath)
sauvegardeExterne=getCsvFileSaveDownloads(getFileDownlodsExternal())
db=get_db()
# lire static et supprimer les liens cassés
for f in filesStatic:
if f not in filesInstance:
current_app.logger.info(f'Supression de "{f}" dans static')
os.remove(current_app.staticDownloadPath+'/'+f)
# lire instance et supprimer les fichiers qui ne sont pas dans static
for f in filesInstance:
if f not in filesStatic:
current_app.logger.info(f'Supression de "{f}" dans instance')
os.remove(current_app.downloadsPath+'/'+f)
# remettre la base en état
filesInstance=os.listdir(current_app.downloadsPath)
tout = db.execute(
'SELECT * '
'FROM downloads '
).fetchall()
for ligne in tout:
if ligne['fileName'] not in filesStatic:
deleteDB(ligne['fileName'],save=False)
# lire la sauvegarde externe et recharger ce qui manque
for f in sauvegardeExterne:
cherche = db.execute(
'SELECT * '
'FROM downloads '
'WHERE fileName=?',(f['fileName'],)
).fetchall()
if len(cherche)!=1:
url=f['fileName']
current_app.logger.info(f'Téléchargement de {url}')
error,fileName= telecharger(f['url'],gererSession=False)
if error is None:
insertDB(f['url'],f['fileName'])
| 30.114537 | 190 | 0.633996 | import os
import requests
import sqlite3,csv
import click
from flask import current_app
from flask.cli import with_appcontext
def get_db():
if current_app.config.get('db') is None:
current_app.config['db'] = sqlite3.connect(
current_app.DB_FILE,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False # pour éviter l'erreur "sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread" mais mystère sur la concurrence
)
current_app.config['db'].row_factory = sqlite3.Row
return current_app.config['db']
def init_db():
# suppression des fichiers
try:
os.remove(current_app.DB_FILE)
except: pass
try:
for dir in [current_app.staticDownloadPath,current_app.downloadsPath]:
files=os.listdir(dir)
for f in files:
os.remove(dir+'/'+f)
except: pass
db = get_db()
with current_app.open_resource('db/schema.sql') as f:
db.executescript(f.read().decode('utf8'))
def insertDB(url,fileName,save=True):
db=get_db()
db.execute(
"insert into downloads(url,fileName) "
"values(?,?) ",(url,fileName)
)
db.commit()
if save:
saveDB()
def deleteDB(fileName,save=True):
db=get_db()
db.execute(
"delete from downloads "
"where fileName=? ",(fileName,)
)
db.commit()
if save:
saveDB()
colonnesSauvegardees=['url','fileName']
urlSauvegardeHook='/save-downloads.php'
def saveDB():
db=get_db()
lignes = db.execute(
'SELECT * '
'FROM downloads '
'ORDER BY id'
).fetchall()
txt=''
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{colonne}"'
txt+='\n'
for ligne in lignes:
for colonne in colonnesSauvegardees:
if colonne!=colonnesSauvegardees[0]:
txt+=','
txt+= f'"{ligne[colonne]}"'
txt+='\n'
fileSaveDownloads=current_app.instance_path+'/'+current_app.fileSaveDownloads
with open(fileSaveDownloads,'w') as f:
f.write(txt)
# sauvegarde externe du fichier fileSaveDownloads
url=current_app.urlSaveDownloads+urlSauvegardeHook
content=bytes(txt,'utf8')
datas={'file':current_app.fileSaveDownloads,'content':content}
p=requests.post(url,datas)
def getFileDownlodsExternal():
url=current_app.urlSaveDownloads+'/'+current_app.fileSaveDownloads
res=requests.get(url).content.decode('utf8')
return res
def getCsvFileSaveDownloads(txt=None):
if txt is None: # sauvegarde texte (txt) dans instance
try:
with open(current_app.instance_path+'/'+current_app.fileSaveDownloads) as f:
return list(csv.DictReader(f))
except:
return None
else: # lire le txt issu normalement de la sauvegarde externe
return list(csv.DictReader(txt.splitlines()))
@click.command('init-db')
@with_appcontext
def init_db_command():
init_db()
click.echo('Remise à zéro des fichiers et de la base.')
# "register" des fonctions
def init_app(app):
# app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
app.cli.add_command(showDB)
app.cli.add_command(reloadDownloads)
def getTableContentString(resultSet,colonnes):
widths = []
tavnit = '|'
separator = '+'
for entete in colonnes:
widths.append(len(entete))
for ligne in resultSet:
index=0
for colonne in colonnes:
widths[index]=max(widths[index],len(str(ligne[colonne])))
index+=1
for w in widths:
tavnit += " %-"+"%ss |" % (w,)
separator += '-'*w + '--+'
res=separator+'\n'
res+= tavnit % tuple(colonnes)+'\n'
res+= separator+'\n'
for ligne in resultSet:
tab=[]
for colonne in colonnes:
tab.append(ligne[colonne])
res+= tavnit % tuple(tab)+'\n'
res+= separator+'\n'
res+= f"{len(resultSet)} ligne{'' if len(resultSet)<=1 else 's'}"
return res
@click.command('show-db')
@with_appcontext
def showDB():
db=get_db()
tout = db.execute(
'SELECT * '
'FROM downloads'
).fetchall()
colonnes=['url','fileName']
click.echo(getTableContentString(tout,colonnes))
from ..download import telecharger
@click.command('reload-downloads')
@with_appcontext
def reloadDownloads():
filesStatic=os.listdir(current_app.staticDownloadPath)
filesInstance=os.listdir(current_app.downloadsPath)
sauvegardeExterne=getCsvFileSaveDownloads(getFileDownlodsExternal())
db=get_db()
# lire static et supprimer les liens cassés
for f in filesStatic:
if f not in filesInstance:
current_app.logger.info(f'Supression de "{f}" dans static')
os.remove(current_app.staticDownloadPath+'/'+f)
# lire instance et supprimer les fichiers qui ne sont pas dans static
for f in filesInstance:
if f not in filesStatic:
current_app.logger.info(f'Supression de "{f}" dans instance')
os.remove(current_app.downloadsPath+'/'+f)
# remettre la base en état
filesInstance=os.listdir(current_app.downloadsPath)
tout = db.execute(
'SELECT * '
'FROM downloads '
).fetchall()
for ligne in tout:
if ligne['fileName'] not in filesStatic:
deleteDB(ligne['fileName'],save=False)
# lire la sauvegarde externe et recharger ce qui manque
for f in sauvegardeExterne:
cherche = db.execute(
'SELECT * '
'FROM downloads '
'WHERE fileName=?',(f['fileName'],)
).fetchall()
if len(cherche)!=1:
url=f['fileName']
current_app.logger.info(f'Téléchargement de {url}')
error,fileName= telecharger(f['url'],gererSession=False)
if error is None:
insertDB(f['url'],f['fileName'])
| true | true |
f7f602edd39ee04a3b98e7ae3219cbf20882da94 | 1,865 | py | Python | docuploader/credentials.py | renovate-bot/docuploader | 64cb69e2a2f50f96a0b992ccbd78336eddc3c420 | [
"Apache-2.0"
] | 7 | 2019-06-18T12:07:31.000Z | 2021-05-31T05:48:57.000Z | docuploader/credentials.py | renovate-bot/docuploader | 64cb69e2a2f50f96a0b992ccbd78336eddc3c420 | [
"Apache-2.0"
] | 56 | 2019-03-07T23:15:58.000Z | 2022-01-19T14:34:30.000Z | docuploader/credentials.py | renovate-bot/docuploader | 64cb69e2a2f50f96a0b992ccbd78336eddc3c420 | [
"Apache-2.0"
] | 4 | 2019-06-18T07:31:07.000Z | 2021-05-04T05:11:57.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for finding credentials.
When running this tool, there are two possible scenarios:
1. The tool is running as part of CI to publish the docs. In that case,
credentials should be at a well-known location or use ADC.
2. The tool is running locally as part of development. In that case,
the credentials should be passed into the command-line or via ADC.
(2) takes precedence. Command-line params also override local files.
"""
import os
from typing import List, Optional
from google.oauth2 import service_account
import google.auth
_WELL_KNOWN_LOCATIONS: List[str] = []
if "KOKORO_KEYSTORE_DIR" in os.environ:
_WELL_KNOWN_LOCATIONS.append(
os.path.join(
os.environ["KOKORO_KEYSTORE_DIR"], "73713_docuploader_service_account"
)
)
def find_path():
for location in _WELL_KNOWN_LOCATIONS:
if os.path.exists(location):
return location
return ""
def find(credentials_file: Optional[str] = ""):
if not credentials_file:
credentials_file = find_path()
if credentials_file != "":
credentials = service_account.Credentials.from_service_account_file(
credentials_file
)
return credentials, credentials.project_id
else:
return google.auth.default()
| 30.57377 | 82 | 0.724397 |
import os
from typing import List, Optional
from google.oauth2 import service_account
import google.auth
_WELL_KNOWN_LOCATIONS: List[str] = []
if "KOKORO_KEYSTORE_DIR" in os.environ:
_WELL_KNOWN_LOCATIONS.append(
os.path.join(
os.environ["KOKORO_KEYSTORE_DIR"], "73713_docuploader_service_account"
)
)
def find_path():
for location in _WELL_KNOWN_LOCATIONS:
if os.path.exists(location):
return location
return ""
def find(credentials_file: Optional[str] = ""):
if not credentials_file:
credentials_file = find_path()
if credentials_file != "":
credentials = service_account.Credentials.from_service_account_file(
credentials_file
)
return credentials, credentials.project_id
else:
return google.auth.default()
| true | true |
f7f603a9304c71ceb36c928b87382cc026476665 | 5,826 | py | Python | ethereum/transactions.py | c-castillo/pyethereum | 6e0e9a937c16d67faed16c5fce9a21bdef5a58ee | [
"MIT"
] | null | null | null | ethereum/transactions.py | c-castillo/pyethereum | 6e0e9a937c16d67faed16c5fce9a21bdef5a58ee | [
"MIT"
] | null | null | null | ethereum/transactions.py | c-castillo/pyethereum | 6e0e9a937c16d67faed16c5fce9a21bdef5a58ee | [
"MIT"
] | null | null | null | from bitcoin import encode_pubkey, N, P
try:
from c_secp256k1 import ecdsa_raw_sign, ecdsa_raw_recover
except ImportError:
from bitcoin import ecdsa_raw_sign, ecdsa_raw_recover
import rlp
from rlp.sedes import big_endian_int, binary
from rlp.utils import decode_hex, encode_hex
from ethereum import bloom
from ethereum import utils
from ethereum.processblock import mk_contract_address, intrinsic_gas_used
from ethereum.utils import TT256
from ethereum.exceptions import InvalidTransaction
from ethereum.slogging import get_logger
log = get_logger('eth.chain.tx')
# in the yellow paper it is specified that s should be smaller than secpk1n (eq.205)
secpk1n = 115792089237316195423570985008687907852837564279074904382605163141518161494337
class Transaction(rlp.Serializable):
"""
A transaction is stored as:
[nonce, gasprice, startgas, to, value, data, v, r, s]
nonce is the number of transactions already sent by that account, encoded
in binary form (eg. 0 -> '', 7 -> '\x07', 1000 -> '\x03\xd8').
(v,r,s) is the raw Electrum-style signature of the transaction without the
signature made with the private key corresponding to the sending account,
with 0 <= v <= 3. From an Electrum-style signature (65 bytes) it is
possible to extract the public key, and thereby the address, directly.
A valid transaction is one where:
(i) the signature is well-formed (ie. 0 <= v <= 3, 0 <= r < P, 0 <= s < N,
0 <= r < P - N if v >= 2), and
(ii) the sending account has enough funds to pay the fee and the value.
"""
fields = [
('nonce', big_endian_int),
('gasprice', big_endian_int),
('startgas', big_endian_int),
('to', utils.address),
('value', big_endian_int),
('data', binary),
('v', big_endian_int),
('r', big_endian_int),
('s', big_endian_int),
]
_sender = None
def __init__(self, nonce, gasprice, startgas, to, value, data, v=0, r=0, s=0):
to = utils.normalize_address(to, allow_blank=True)
assert len(to) == 20 or len(to) == 0
super(Transaction, self).__init__(nonce, gasprice, startgas, to, value, data, v, r, s)
self.logs = []
if self.gasprice >= TT256 or self.startgas >= TT256 or \
self.value >= TT256 or self.nonce >= TT256:
raise InvalidTransaction("Values way too high!")
if self.startgas < intrinsic_gas_used(self):
raise InvalidTransaction("Startgas too low")
log.debug('deserialized tx', tx=encode_hex(self.hash)[:8])
@property
def sender(self):
if not self._sender:
# Determine sender
if self.v:
if self.r >= N or self.s >= N or self.v < 27 or self.v > 28 \
or self.r == 0 or self.s == 0:
raise InvalidTransaction("Invalid signature values!")
log.debug('recovering sender')
rlpdata = rlp.encode(self, UnsignedTransaction)
rawhash = utils.sha3(rlpdata)
pub = ecdsa_raw_recover(rawhash, (self.v, self.r, self.s))
if pub is False:
raise InvalidTransaction("Invalid signature values (x^3+7 is non-residue)")
if pub == (0, 0):
raise InvalidTransaction("Invalid signature (zero privkey cannot sign)")
pub = encode_pubkey(pub, 'bin')
self._sender = utils.sha3(pub[1:])[-20:]
assert self.sender == self._sender
else:
self._sender = 0
return self._sender
@sender.setter
def sender(self, value):
self._sender = value
def sign(self, key):
"""Sign this transaction with a private key.
A potentially already existing signature would be overridden.
"""
if key in (0, '', '\x00' * 32):
raise InvalidTransaction("Zero privkey cannot sign")
rawhash = utils.sha3(rlp.encode(self, UnsignedTransaction))
self.v, self.r, self.s = ecdsa_raw_sign(rawhash, key)
self.sender = utils.privtoaddr(key)
return self
@property
def hash(self):
return utils.sha3(rlp.encode(self))
def log_bloom(self):
"returns int"
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
def log_bloom_b64(self):
return bloom.b64(self.log_bloom())
def to_dict(self):
# TODO: previous version used printers
d = {}
for name, _ in self.__class__.fields:
d[name] = getattr(self, name)
d['sender'] = self.sender
d['hash'] = encode_hex(self.hash)
return d
def log_dict(self):
d = self.to_dict()
d['sender'] = encode_hex(d['sender'] or '')
d['to'] = encode_hex(d['to'])
return d
@property
def creates(self):
"returns the address of a contract created by this tx"
if self.to in (b'', '\0'*20):
return mk_contract_address(self.sender, self.nonce)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Transaction(%s)>' % encode_hex(self.hash)[:4]
def __structlog__(self):
return encode_hex(self.hash)
UnsignedTransaction = Transaction.exclude(['v', 'r', 's'])
def contract(nonce, gasprice, startgas, endowment, code, v=0, r=0, s=0):
"""A contract is a special transaction without the `to` argument."""
tx = Transaction(nonce, gasprice, startgas, '', endowment, code, v, r, s)
return tx
| 35.096386 | 95 | 0.618778 | from bitcoin import encode_pubkey, N, P
try:
from c_secp256k1 import ecdsa_raw_sign, ecdsa_raw_recover
except ImportError:
from bitcoin import ecdsa_raw_sign, ecdsa_raw_recover
import rlp
from rlp.sedes import big_endian_int, binary
from rlp.utils import decode_hex, encode_hex
from ethereum import bloom
from ethereum import utils
from ethereum.processblock import mk_contract_address, intrinsic_gas_used
from ethereum.utils import TT256
from ethereum.exceptions import InvalidTransaction
from ethereum.slogging import get_logger
log = get_logger('eth.chain.tx')
secpk1n = 115792089237316195423570985008687907852837564279074904382605163141518161494337
class Transaction(rlp.Serializable):
fields = [
('nonce', big_endian_int),
('gasprice', big_endian_int),
('startgas', big_endian_int),
('to', utils.address),
('value', big_endian_int),
('data', binary),
('v', big_endian_int),
('r', big_endian_int),
('s', big_endian_int),
]
_sender = None
def __init__(self, nonce, gasprice, startgas, to, value, data, v=0, r=0, s=0):
to = utils.normalize_address(to, allow_blank=True)
assert len(to) == 20 or len(to) == 0
super(Transaction, self).__init__(nonce, gasprice, startgas, to, value, data, v, r, s)
self.logs = []
if self.gasprice >= TT256 or self.startgas >= TT256 or \
self.value >= TT256 or self.nonce >= TT256:
raise InvalidTransaction("Values way too high!")
if self.startgas < intrinsic_gas_used(self):
raise InvalidTransaction("Startgas too low")
log.debug('deserialized tx', tx=encode_hex(self.hash)[:8])
@property
def sender(self):
if not self._sender:
if self.v:
if self.r >= N or self.s >= N or self.v < 27 or self.v > 28 \
or self.r == 0 or self.s == 0:
raise InvalidTransaction("Invalid signature values!")
log.debug('recovering sender')
rlpdata = rlp.encode(self, UnsignedTransaction)
rawhash = utils.sha3(rlpdata)
pub = ecdsa_raw_recover(rawhash, (self.v, self.r, self.s))
if pub is False:
raise InvalidTransaction("Invalid signature values (x^3+7 is non-residue)")
if pub == (0, 0):
raise InvalidTransaction("Invalid signature (zero privkey cannot sign)")
pub = encode_pubkey(pub, 'bin')
self._sender = utils.sha3(pub[1:])[-20:]
assert self.sender == self._sender
else:
self._sender = 0
return self._sender
@sender.setter
def sender(self, value):
self._sender = value
def sign(self, key):
if key in (0, '', '\x00' * 32):
raise InvalidTransaction("Zero privkey cannot sign")
rawhash = utils.sha3(rlp.encode(self, UnsignedTransaction))
self.v, self.r, self.s = ecdsa_raw_sign(rawhash, key)
self.sender = utils.privtoaddr(key)
return self
@property
def hash(self):
return utils.sha3(rlp.encode(self))
def log_bloom(self):
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
def log_bloom_b64(self):
return bloom.b64(self.log_bloom())
def to_dict(self):
d = {}
for name, _ in self.__class__.fields:
d[name] = getattr(self, name)
d['sender'] = self.sender
d['hash'] = encode_hex(self.hash)
return d
def log_dict(self):
d = self.to_dict()
d['sender'] = encode_hex(d['sender'] or '')
d['to'] = encode_hex(d['to'])
return d
@property
def creates(self):
if self.to in (b'', '\0'*20):
return mk_contract_address(self.sender, self.nonce)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Transaction(%s)>' % encode_hex(self.hash)[:4]
def __structlog__(self):
return encode_hex(self.hash)
UnsignedTransaction = Transaction.exclude(['v', 'r', 's'])
def contract(nonce, gasprice, startgas, endowment, code, v=0, r=0, s=0):
tx = Transaction(nonce, gasprice, startgas, '', endowment, code, v, r, s)
return tx
| true | true |
f7f604520a4b79ffe1a606bacdcb748abbd4e508 | 1,656 | py | Python | frameworks/helloworld/tests/test_pre_reserved_sidecar.py | elezar/dcos-commons | b7b3aeec1d5b6dc9073ba07000d4e48784143846 | [
"Apache-2.0"
] | 7 | 2017-11-02T05:26:40.000Z | 2020-01-27T19:33:52.000Z | frameworks/helloworld/tests/test_pre_reserved_sidecar.py | elezar/dcos-commons | b7b3aeec1d5b6dc9073ba07000d4e48784143846 | [
"Apache-2.0"
] | 14 | 2017-09-20T22:47:48.000Z | 2020-09-11T19:54:25.000Z | frameworks/helloworld/tests/test_pre_reserved_sidecar.py | AlexRogalskiy/dcos-commons | 85711f05bc94172aabb6837f9ff529721437d20c | [
"Apache-2.0"
] | 9 | 2017-11-14T19:43:07.000Z | 2022-01-06T12:44:49.000Z | import logging
import pytest
import retrying
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_plan
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(
sdk_utils.is_strict_mode() and sdk_utils.dcos_version_less_than('1.11'),
reason="secure hierarchical roles are only supported on 1.11+")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
options = {
"service": {
"yaml": "pre-reserved-sidecar"
}
}
# this yml has 1 hello's + 0 world's:
sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
def test_deploy():
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
def test_sidecar():
run_plan('sidecar')
def run_plan(plan_name, params=None):
sdk_plan.start_plan(config.SERVICE_NAME, plan_name, params)
started_plan = sdk_plan.get_plan(config.SERVICE_NAME, plan_name)
log.info("sidecar plan: " + str(started_plan))
assert(len(started_plan['phases']) == 1)
assert(started_plan['phases'][0]['name'] == plan_name + '-deploy')
assert(len(started_plan['phases'][0]['steps']) == 1)
sdk_plan.wait_for_completed_plan(config.SERVICE_NAME, plan_name)
| 27.147541 | 100 | 0.716184 | import logging
import pytest
import retrying
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_plan
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(
sdk_utils.is_strict_mode() and sdk_utils.dcos_version_less_than('1.11'),
reason="secure hierarchical roles are only supported on 1.11+")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
options = {
"service": {
"yaml": "pre-reserved-sidecar"
}
}
sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options)
yield
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
def test_deploy():
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
def test_sidecar():
run_plan('sidecar')
def run_plan(plan_name, params=None):
sdk_plan.start_plan(config.SERVICE_NAME, plan_name, params)
started_plan = sdk_plan.get_plan(config.SERVICE_NAME, plan_name)
log.info("sidecar plan: " + str(started_plan))
assert(len(started_plan['phases']) == 1)
assert(started_plan['phases'][0]['name'] == plan_name + '-deploy')
assert(len(started_plan['phases'][0]['steps']) == 1)
sdk_plan.wait_for_completed_plan(config.SERVICE_NAME, plan_name)
| true | true |
f7f604b4a41308c3a63999cb5082a40c7e2df948 | 3,153 | py | Python | datasets/data_prefetcher.py | Tarandro/TransTrack | 81c12d0737125052f3eb2773ac47be60144f6ccb | [
"MIT"
] | 466 | 2020-12-31T02:53:51.000Z | 2022-03-28T08:55:35.000Z | datasets/data_prefetcher.py | Tarandro/TransTrack | 81c12d0737125052f3eb2773ac47be60144f6ccb | [
"MIT"
] | 59 | 2021-01-01T06:27:55.000Z | 2022-03-30T10:28:23.000Z | datasets/data_prefetcher.py | Tarandro/TransTrack | 81c12d0737125052f3eb2773ac47be60144f6ccb | [
"MIT"
] | 90 | 2021-01-01T08:56:07.000Z | 2022-03-30T09:38:31.000Z | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
import torch
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
return samples, targets
class data_prefetcher():
def __init__(self, loader, device, prefetch=True):
self.loader = iter(loader)
self.prefetch = prefetch
self.device = device
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_samples, self.next_targets = next(self.loader)
except StopIteration:
self.next_samples = None
self.next_targets = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
def next(self):
if self.prefetch:
torch.cuda.current_stream().wait_stream(self.stream)
samples = self.next_samples
targets = self.next_targets
if samples is not None:
samples.record_stream(torch.cuda.current_stream())
if targets is not None:
for t in targets:
for k, v in t.items():
v.record_stream(torch.cuda.current_stream())
self.preload()
else:
try:
samples, targets = next(self.loader)
samples, targets = to_cuda(samples, targets, self.device)
except StopIteration:
samples = None
targets = None
return samples, targets
| 45.042857 | 110 | 0.572154 |
import torch
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
return samples, targets
class data_prefetcher():
def __init__(self, loader, device, prefetch=True):
self.loader = iter(loader)
self.prefetch = prefetch
self.device = device
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_samples, self.next_targets = next(self.loader)
except StopIteration:
self.next_samples = None
self.next_targets = None
return
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)
# more code for the alternative if record_stream() doesn't work:
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
def next(self):
if self.prefetch:
torch.cuda.current_stream().wait_stream(self.stream)
samples = self.next_samples
targets = self.next_targets
if samples is not None:
samples.record_stream(torch.cuda.current_stream())
if targets is not None:
for t in targets:
for k, v in t.items():
v.record_stream(torch.cuda.current_stream())
self.preload()
else:
try:
samples, targets = next(self.loader)
samples, targets = to_cuda(samples, targets, self.device)
except StopIteration:
samples = None
targets = None
return samples, targets
| true | true |
f7f6055ac8fa209475c1ae141e56a9189206bcf9 | 935 | py | Python | www/src/web_workers/test_worker.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 5,926 | 2015-01-01T07:45:08.000Z | 2022-03-31T12:34:38.000Z | www/src/web_workers/test_worker.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 1,728 | 2015-01-01T01:09:12.000Z | 2022-03-30T23:25:22.000Z | www/src/web_workers/test_worker.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 574 | 2015-01-02T01:36:10.000Z | 2022-03-26T10:18:48.000Z | """
An example of a simple worker
Run as follows:
from browser import webworker as ww
w = ww.WorkerParent('web_workers/test_worker.py',[1,2,3],{"test":"Ahoj"})
m = ww.Message('ping',"ahoj")
r = w.post_message(m,want_reply=True)
w.post_message(ww.Message('quit',None))
"""
from browser.webworker import current_worker, Message
from browser import console
from sys import argv
from os import environ
def pong(self, message, **_):
print('Web worker received message (',message.id,')', message.name, message.data)
current_worker.post_reply(message, Message('pong', message.data))
def quit(self, *args, **kwargs):
current_worker.terminate()
print("Starting test worker with args:", argv, "and environment", environ)
current_worker.bind_message('ping', pong)
current_worker.bind_message('quit', quit)
current_worker.exec()
| 29.21875 | 85 | 0.660963 | from browser.webworker import current_worker, Message
from browser import console
from sys import argv
from os import environ
def pong(self, message, **_):
print('Web worker received message (',message.id,')', message.name, message.data)
current_worker.post_reply(message, Message('pong', message.data))
def quit(self, *args, **kwargs):
current_worker.terminate()
print("Starting test worker with args:", argv, "and environment", environ)
current_worker.bind_message('ping', pong)
current_worker.bind_message('quit', quit)
current_worker.exec()
| true | true |
f7f606ac7779bb6b68b85c5fc253535bb673dbc0 | 243 | py | Python | itscsapp/carrers/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | itscsapp/carrers/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | itscsapp/carrers/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | from itscsapp.carrers.models.semesters import *
from itscsapp.carrers.models.carrer import *
from itscsapp.carrers.models.informations import *
from itscsapp.carrers.models.categories import *
from itscsapp.carrers.models.asignatures import *
| 40.5 | 50 | 0.835391 | from itscsapp.carrers.models.semesters import *
from itscsapp.carrers.models.carrer import *
from itscsapp.carrers.models.informations import *
from itscsapp.carrers.models.categories import *
from itscsapp.carrers.models.asignatures import *
| true | true |
f7f607330315df560da32c1258c9a8b9e09d78dd | 9,132 | py | Python | featuretools/computational_backends/feature_set.py | ridicolos/featuretools | 0af409da206e0b691ec64a3e0e618a43f1701dd9 | [
"BSD-3-Clause"
] | 942 | 2020-11-10T02:59:39.000Z | 2022-03-31T16:34:33.000Z | featuretools/computational_backends/feature_set.py | 167rgc911/featuretools | bbad3f7392b203b7b9c250a93465052e7fc06bbc | [
"BSD-3-Clause"
] | 721 | 2020-11-09T23:12:06.000Z | 2022-03-31T22:33:35.000Z | featuretools/computational_backends/feature_set.py | 167rgc911/featuretools | bbad3f7392b203b7b9c250a93465052e7fc06bbc | [
"BSD-3-Clause"
] | 127 | 2020-11-10T10:12:30.000Z | 2022-03-27T08:55:05.000Z | import itertools
import logging
from collections import defaultdict
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import (
AggregationFeature,
FeatureOutputSlice,
GroupByTransformFeature,
TransformFeature
)
from featuretools.utils import Trie
logger = logging.getLogger('featuretools.computational_backend')
class FeatureSet(object):
"""
Represents an immutable set of features to be calculated for a single dataframe, and their
dependencies.
"""
def __init__(self, features, approximate_feature_trie=None):
"""
Args:
features (list[Feature]): Features of the target dataframe.
approximate_feature_trie (Trie[RelationshipPath, set[str]], optional): Dependency
features to ignore because they have already been approximated. For example, if
one of the target features is a direct feature of a feature A and A is included in
approximate_feature_trie then neither A nor its dependencies will appear in
FeatureSet.feature_trie.
"""
self.target_df_name = features[0].dataframe_name
self.target_features = features
self.target_feature_names = {f.unique_name() for f in features}
if not approximate_feature_trie:
approximate_feature_trie = Trie(default=list,
path_constructor=RelationshipPath)
self.approximate_feature_trie = approximate_feature_trie
# Maps the unique name of each feature to the actual feature. This is necessary
# because features do not support equality and so cannot be used as
# dictionary keys. The equality operator on features produces a new
# feature (which will always be truthy).
self.features_by_name = {f.unique_name(): f for f in features}
feature_dependents = defaultdict(set)
for f in features:
deps = f.get_dependencies(deep=True)
for dep in deps:
feature_dependents[dep.unique_name()].add(f.unique_name())
self.features_by_name[dep.unique_name()] = dep
subdeps = dep.get_dependencies(deep=True)
for sd in subdeps:
feature_dependents[sd.unique_name()].add(dep.unique_name())
# feature names (keys) and the features that rely on them (values).
self.feature_dependents = {
fname: [self.features_by_name[dname] for dname in feature_dependents[fname]]
for fname, f in self.features_by_name.items()}
self._feature_trie = None
@property
def feature_trie(self):
"""
The target features and their dependencies organized into a trie by relationship path.
This is built once when it is first called (to avoid building it if it is not needed) and
then used for all subsequent calls.
The edges of the trie are RelationshipPaths and the values are tuples of
(bool, set[str], set[str]). The bool represents whether the full dataframe is needed at
that node, the first set contains the names of features which are needed on the full
dataframe, and the second set contains the names of the rest of the features
Returns:
Trie[RelationshipPath, (bool, set[str], set[str])]
"""
if not self._feature_trie:
self._feature_trie = self._build_feature_trie()
return self._feature_trie
def _build_feature_trie(self):
"""
Build the feature trie by adding the target features and their dependencies recursively.
"""
feature_trie = Trie(default=lambda: (False, set(), set()),
path_constructor=RelationshipPath)
for f in self.target_features:
self._add_feature_to_trie(feature_trie,
f,
self.approximate_feature_trie)
return feature_trie
def _add_feature_to_trie(self, trie, feature, approximate_feature_trie,
ancestor_needs_full_dataframe=False):
"""
Add the given feature to the root of the trie, and recurse on its dependencies. If it is in
approximate_feature_trie then it will not be added and we will not recurse on its dependencies.
"""
node_needs_full_dataframe, full_features, not_full_features = trie.value
needs_full_dataframe = ancestor_needs_full_dataframe or self.uses_full_dataframe(feature)
name = feature.unique_name()
# If this feature is ignored then don't add it or any of its dependencies.
if name in approximate_feature_trie.value:
return
# Add the feature to one of the sets, depending on whether it needs the full dataframe.
if needs_full_dataframe:
full_features.add(name)
if name in not_full_features:
not_full_features.remove(name)
# Update needs_full_dataframe for this node.
trie.value = (True, full_features, not_full_features)
# Set every node in relationship path to needs_full_dataframe.
sub_trie = trie
for edge in feature.relationship_path:
sub_trie = sub_trie.get_node([edge])
(_, f1, f2) = sub_trie.value
sub_trie.value = (True, f1, f2)
else:
if name not in full_features:
not_full_features.add(name)
sub_trie = trie.get_node(feature.relationship_path)
sub_ignored_trie = approximate_feature_trie.get_node(feature.relationship_path)
for dep_feat in feature.get_dependencies():
if isinstance(dep_feat, FeatureOutputSlice):
dep_feat = dep_feat.base_feature
self._add_feature_to_trie(sub_trie, dep_feat, sub_ignored_trie,
ancestor_needs_full_dataframe=needs_full_dataframe)
def group_features(self, feature_names):
"""
Topologically sort the given features, then group by path,
feature type, use_previous, and where.
"""
features = [self.features_by_name[name] for name in feature_names]
depths = self._get_feature_depths(features)
def key_func(f):
return (depths[f.unique_name()],
f.relationship_path_name(),
str(f.__class__),
_get_use_previous(f),
_get_where(f),
self.uses_full_dataframe(f),
_get_groupby(f))
# Sort the list of features by the complex key function above, then
# group them by the same key
sort_feats = sorted(features, key=key_func)
feature_groups = [list(g) for _, g in
itertools.groupby(sort_feats, key=key_func)]
return feature_groups
def _get_feature_depths(self, features):
"""
Generate and return a mapping of {feature name -> depth} in the
feature DAG for the given dataframe.
"""
order = defaultdict(int)
depths = {}
queue = features[:]
while queue:
# Get the next feature.
f = queue.pop(0)
depths[f.unique_name()] = order[f.unique_name()]
# Only look at dependencies if they are on the same dataframe.
if not f.relationship_path:
dependencies = f.get_dependencies()
for dep in dependencies:
order[dep.unique_name()] = \
min(order[f.unique_name()] - 1, order[dep.unique_name()])
queue.append(dep)
return depths
def uses_full_dataframe(self, feature, check_dependents=False):
if isinstance(feature, TransformFeature) and feature.primitive.uses_full_dataframe:
return True
return check_dependents and self._dependent_uses_full_dataframe(feature)
def _dependent_uses_full_dataframe(self, feature):
for d in self.feature_dependents[feature.unique_name()]:
if isinstance(d, TransformFeature) and d.primitive.uses_full_dataframe:
return True
return False
# These functions are used for sorting and grouping features
def _get_use_previous(f): # TODO Sort and group features for DateOffset with two different temporal values
if isinstance(f, AggregationFeature) and f.use_previous is not None:
if len(f.use_previous.times.keys()) > 1:
return ("", -1)
else:
unit = list(f.use_previous.times.keys())[0]
value = f.use_previous.times[unit]
return (unit, value)
else:
return ("", -1)
def _get_where(f):
if isinstance(f, AggregationFeature) and f.where is not None:
return f.where.unique_name()
else:
return ''
def _get_groupby(f):
if isinstance(f, GroupByTransformFeature):
return f.groupby.unique_name()
else:
return ''
| 39.704348 | 107 | 0.633377 | import itertools
import logging
from collections import defaultdict
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import (
AggregationFeature,
FeatureOutputSlice,
GroupByTransformFeature,
TransformFeature
)
from featuretools.utils import Trie
logger = logging.getLogger('featuretools.computational_backend')
class FeatureSet(object):
def __init__(self, features, approximate_feature_trie=None):
self.target_df_name = features[0].dataframe_name
self.target_features = features
self.target_feature_names = {f.unique_name() for f in features}
if not approximate_feature_trie:
approximate_feature_trie = Trie(default=list,
path_constructor=RelationshipPath)
self.approximate_feature_trie = approximate_feature_trie
self.features_by_name = {f.unique_name(): f for f in features}
feature_dependents = defaultdict(set)
for f in features:
deps = f.get_dependencies(deep=True)
for dep in deps:
feature_dependents[dep.unique_name()].add(f.unique_name())
self.features_by_name[dep.unique_name()] = dep
subdeps = dep.get_dependencies(deep=True)
for sd in subdeps:
feature_dependents[sd.unique_name()].add(dep.unique_name())
self.feature_dependents = {
fname: [self.features_by_name[dname] for dname in feature_dependents[fname]]
for fname, f in self.features_by_name.items()}
self._feature_trie = None
@property
def feature_trie(self):
if not self._feature_trie:
self._feature_trie = self._build_feature_trie()
return self._feature_trie
def _build_feature_trie(self):
feature_trie = Trie(default=lambda: (False, set(), set()),
path_constructor=RelationshipPath)
for f in self.target_features:
self._add_feature_to_trie(feature_trie,
f,
self.approximate_feature_trie)
return feature_trie
def _add_feature_to_trie(self, trie, feature, approximate_feature_trie,
ancestor_needs_full_dataframe=False):
node_needs_full_dataframe, full_features, not_full_features = trie.value
needs_full_dataframe = ancestor_needs_full_dataframe or self.uses_full_dataframe(feature)
name = feature.unique_name()
if name in approximate_feature_trie.value:
return
# Add the feature to one of the sets, depending on whether it needs the full dataframe.
if needs_full_dataframe:
full_features.add(name)
if name in not_full_features:
not_full_features.remove(name)
# Update needs_full_dataframe for this node.
trie.value = (True, full_features, not_full_features)
# Set every node in relationship path to needs_full_dataframe.
sub_trie = trie
for edge in feature.relationship_path:
sub_trie = sub_trie.get_node([edge])
(_, f1, f2) = sub_trie.value
sub_trie.value = (True, f1, f2)
else:
if name not in full_features:
not_full_features.add(name)
sub_trie = trie.get_node(feature.relationship_path)
sub_ignored_trie = approximate_feature_trie.get_node(feature.relationship_path)
for dep_feat in feature.get_dependencies():
if isinstance(dep_feat, FeatureOutputSlice):
dep_feat = dep_feat.base_feature
self._add_feature_to_trie(sub_trie, dep_feat, sub_ignored_trie,
ancestor_needs_full_dataframe=needs_full_dataframe)
def group_features(self, feature_names):
features = [self.features_by_name[name] for name in feature_names]
depths = self._get_feature_depths(features)
def key_func(f):
return (depths[f.unique_name()],
f.relationship_path_name(),
str(f.__class__),
_get_use_previous(f),
_get_where(f),
self.uses_full_dataframe(f),
_get_groupby(f))
# Sort the list of features by the complex key function above, then
# group them by the same key
sort_feats = sorted(features, key=key_func)
feature_groups = [list(g) for _, g in
itertools.groupby(sort_feats, key=key_func)]
return feature_groups
def _get_feature_depths(self, features):
order = defaultdict(int)
depths = {}
queue = features[:]
while queue:
# Get the next feature.
f = queue.pop(0)
depths[f.unique_name()] = order[f.unique_name()]
# Only look at dependencies if they are on the same dataframe.
if not f.relationship_path:
dependencies = f.get_dependencies()
for dep in dependencies:
order[dep.unique_name()] = \
min(order[f.unique_name()] - 1, order[dep.unique_name()])
queue.append(dep)
return depths
def uses_full_dataframe(self, feature, check_dependents=False):
if isinstance(feature, TransformFeature) and feature.primitive.uses_full_dataframe:
return True
return check_dependents and self._dependent_uses_full_dataframe(feature)
def _dependent_uses_full_dataframe(self, feature):
for d in self.feature_dependents[feature.unique_name()]:
if isinstance(d, TransformFeature) and d.primitive.uses_full_dataframe:
return True
return False
# These functions are used for sorting and grouping features
def _get_use_previous(f): # TODO Sort and group features for DateOffset with two different temporal values
if isinstance(f, AggregationFeature) and f.use_previous is not None:
if len(f.use_previous.times.keys()) > 1:
return ("", -1)
else:
unit = list(f.use_previous.times.keys())[0]
value = f.use_previous.times[unit]
return (unit, value)
else:
return ("", -1)
def _get_where(f):
if isinstance(f, AggregationFeature) and f.where is not None:
return f.where.unique_name()
else:
return ''
def _get_groupby(f):
if isinstance(f, GroupByTransformFeature):
return f.groupby.unique_name()
else:
return ''
| true | true |
f7f60830f317e8a79c8a9db2ca16560bb484746e | 3,345 | py | Python | pyalgotrade/optimizer/local.py | gbecedatxapo/pyalgotrade | b1015f2f20714ae02f834a90f25376e40c6f8ab9 | [
"Apache-2.0"
] | 1 | 2020-07-06T18:38:38.000Z | 2020-07-06T18:38:38.000Z | pyalgotrade/optimizer/local.py | gbecedatxapo/pyalgotrade | b1015f2f20714ae02f834a90f25376e40c6f8ab9 | [
"Apache-2.0"
] | null | null | null | pyalgotrade/optimizer/local.py | gbecedatxapo/pyalgotrade | b1015f2f20714ae02f834a90f25376e40c6f8ab9 | [
"Apache-2.0"
] | null | null | null | # PyAlgoTrade
#
# Copyright 2011-2014 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import multiprocessing
import threading
import logging
import socket
import random
import os
from pyalgotrade.optimizer import server
from pyalgotrade.optimizer import worker
def server_thread(srv, barFeed, strategyParameters, port):
srv.serve(barFeed, strategyParameters)
def worker_process(strategyClass, port):
class Worker(worker.Worker):
def runStrategy(self, barFeed, *args, **kwargs):
strat = strategyClass(barFeed, *args, **kwargs)
strat.run()
return strat.getResult()
# Create a worker and run it.
name = "worker-%s" % (os.getpid())
w = Worker("localhost", port, name)
w.getLogger().setLevel(logging.ERROR)
w.run()
def find_port():
while True:
ret = random.randint(1025, 65536)
try:
s = socket.socket()
s.bind(("localhost", ret))
s.close()
return ret
except socket.error:
pass
def run(strategyClass, barFeed, strategyParameters, workerCount=None):
"""Executes many instances of a strategy in parallel and finds the parameters that yield the best results.
:param strategyClass: The strategy class.
:param barFeed: The bar feed to use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param workerCount: The number of strategies to run in parallel. If None then as many workers as CPUs are used.
:type workerCount: int.
"""
assert(workerCount is None or workerCount > 0)
if workerCount is None:
workerCount = multiprocessing.cpu_count()
workers = []
port = find_port()
if port is None:
raise Exception("Failed to find a port to listen")
# Build and start the server thread before the worker processes. We'll manually stop the server once workers have finished.
srv = server.Server("localhost", port, False)
serverThread = threading.Thread(target=server_thread, args=(srv, barFeed, strategyParameters, port))
serverThread.start()
try:
# Build the worker processes.
for i in range(workerCount):
workers.append(multiprocessing.Process(target=worker_process, args=(strategyClass, port)))
# Start workers
for process in workers:
process.start()
# Wait workers
for process in workers:
process.join()
finally:
# Stop and wait the server to finish.
srv.stop()
serverThread.join()
| 32.163462 | 158 | 0.686099 |
import multiprocessing
import threading
import logging
import socket
import random
import os
from pyalgotrade.optimizer import server
from pyalgotrade.optimizer import worker
def server_thread(srv, barFeed, strategyParameters, port):
srv.serve(barFeed, strategyParameters)
def worker_process(strategyClass, port):
class Worker(worker.Worker):
def runStrategy(self, barFeed, *args, **kwargs):
strat = strategyClass(barFeed, *args, **kwargs)
strat.run()
return strat.getResult()
name = "worker-%s" % (os.getpid())
w = Worker("localhost", port, name)
w.getLogger().setLevel(logging.ERROR)
w.run()
def find_port():
while True:
ret = random.randint(1025, 65536)
try:
s = socket.socket()
s.bind(("localhost", ret))
s.close()
return ret
except socket.error:
pass
def run(strategyClass, barFeed, strategyParameters, workerCount=None):
assert(workerCount is None or workerCount > 0)
if workerCount is None:
workerCount = multiprocessing.cpu_count()
workers = []
port = find_port()
if port is None:
raise Exception("Failed to find a port to listen")
srv = server.Server("localhost", port, False)
serverThread = threading.Thread(target=server_thread, args=(srv, barFeed, strategyParameters, port))
serverThread.start()
try:
# Build the worker processes.
for i in range(workerCount):
workers.append(multiprocessing.Process(target=worker_process, args=(strategyClass, port)))
# Start workers
for process in workers:
process.start()
# Wait workers
for process in workers:
process.join()
finally:
# Stop and wait the server to finish.
srv.stop()
serverThread.join()
| true | true |
f7f608e1afa0a7b744c0bcc7829f50cc2a075ca4 | 2,497 | py | Python | website/conferences/model.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | website/conferences/model.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/conferences/model.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import bson
from modularodm import fields, Q
from modularodm.exceptions import ModularOdmException
from framework.mongo import StoredObject
from website.conferences.exceptions import ConferenceError
class Conference(StoredObject):
#: Determines the email address for submission and the OSF url
# Example: If endpoint is spsp2014, then submission email will be
# spsp2014-talk@osf.io or spsp2014-poster@osf.io and the OSF url will
# be osf.io/view/spsp2014
endpoint = fields.StringField(primary=True, required=True, unique=True)
#: Full name, e.g. "SPSP 2014"
name = fields.StringField(required=True)
info_url = fields.StringField(required=False, default=None)
logo_url = fields.StringField(required=False, default=None)
active = fields.BooleanField(required=True)
admins = fields.ForeignField('user', list=True, required=False, default=None)
#: Whether to make submitted projects public
public_projects = fields.BooleanField(required=False, default=True)
poster = fields.BooleanField(default=True)
talk = fields.BooleanField(default=True)
# field_names are used to customize the text on the conference page, the categories
# of submissions, and the email adress to send material to.
field_names = fields.DictionaryField(
default=lambda: {
'submission1': 'poster',
'submission2': 'talk',
'submission1_plural': 'posters',
'submission2_plural': 'talks',
'meeting_title_type': 'Posters & Talks',
'add_submission': 'poster or talk',
'mail_subject': 'Presentation title',
'mail_message_body': 'Presentation abstract (if any)',
'mail_attachment': 'Your presentation file (e.g., PowerPoint, PDF, etc.)'
}
)
# Cached number of submissions
num_submissions = fields.IntegerField(default=0)
@classmethod
def get_by_endpoint(cls, endpoint, active=True):
query = Q('endpoint', 'iexact', endpoint)
if active:
query &= Q('active', 'eq', True)
try:
return Conference.find_one(query)
except ModularOdmException:
raise ConferenceError('Endpoint {0} not found'.format(endpoint))
class MailRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(bson.ObjectId()))
data = fields.DictionaryField()
records = fields.AbstractForeignField(list=True, backref='created')
| 40.274194 | 87 | 0.686424 |
import bson
from modularodm import fields, Q
from modularodm.exceptions import ModularOdmException
from framework.mongo import StoredObject
from website.conferences.exceptions import ConferenceError
class Conference(StoredObject):
endpoint = fields.StringField(primary=True, required=True, unique=True)
name = fields.StringField(required=True)
info_url = fields.StringField(required=False, default=None)
logo_url = fields.StringField(required=False, default=None)
active = fields.BooleanField(required=True)
admins = fields.ForeignField('user', list=True, required=False, default=None)
public_projects = fields.BooleanField(required=False, default=True)
poster = fields.BooleanField(default=True)
talk = fields.BooleanField(default=True)
field_names = fields.DictionaryField(
default=lambda: {
'submission1': 'poster',
'submission2': 'talk',
'submission1_plural': 'posters',
'submission2_plural': 'talks',
'meeting_title_type': 'Posters & Talks',
'add_submission': 'poster or talk',
'mail_subject': 'Presentation title',
'mail_message_body': 'Presentation abstract (if any)',
'mail_attachment': 'Your presentation file (e.g., PowerPoint, PDF, etc.)'
}
)
num_submissions = fields.IntegerField(default=0)
@classmethod
def get_by_endpoint(cls, endpoint, active=True):
query = Q('endpoint', 'iexact', endpoint)
if active:
query &= Q('active', 'eq', True)
try:
return Conference.find_one(query)
except ModularOdmException:
raise ConferenceError('Endpoint {0} not found'.format(endpoint))
class MailRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(bson.ObjectId()))
data = fields.DictionaryField()
records = fields.AbstractForeignField(list=True, backref='created')
| true | true |
f7f60a19681ac9fb722fc906bd1313a2b340af29 | 4,950 | py | Python | vr_optimizers/opts.py | SamuelHorvath/Variance_Reduced_Optimizers_Pytorch | f75da9c53408df2403a5521b634e5d25ffd4a8dc | [
"Apache-2.0"
] | 1 | 2021-07-11T11:17:49.000Z | 2021-07-11T11:17:49.000Z | vr_optimizers/opts.py | SamuelHorvath/Variance_Reduced_Optimizers_Pytorch | f75da9c53408df2403a5521b634e5d25ffd4a8dc | [
"Apache-2.0"
] | null | null | null | vr_optimizers/opts.py | SamuelHorvath/Variance_Reduced_Optimizers_Pytorch | f75da9c53408df2403a5521b634e5d25ffd4a8dc | [
"Apache-2.0"
] | 1 | 2021-07-13T05:09:27.000Z | 2021-07-13T05:09:27.000Z | import argparse
from datetime import datetime
import os
def parse_args(args):
parser = initialise_arg_parser(args, 'Variance Reduction.')
parser.add_argument(
"--total-runs",
type=int,
default=3,
help="Number of times to redo run, we increase seed by 1 if deterministic",
)
parser.add_argument(
"--epochs",
type=int,
default=1,
help="Number of epochs to run",
)
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='Initial learning rate (default: .1)'
)
parser.add_argument(
"--tune-lr",
default=False,
action='store_true',
help="Whether to tune step size during optimization procedure, based on single run"
)
parser.add_argument(
"-b", "--batch-size",
type=int,
default=32,
help="Static batch size for computation, for speed select as large as possible"
)
parser.add_argument(
"--method",
type=str,
required=True,
help="Define which method to run"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
choices=[
"mnist", "cifar10", "cifar100", "mushrooms", "w8a", "ijcnn1", "a9a", "phishing"],
help="Define which dataset to load"
)
parser.add_argument(
"--metric",
type=str,
default='top_1_acc',
choices=["loss", "top_1_acc"],
help="Define which metric to optimize."
)
parser.add_argument(
"--train-metric",
default=False,
action='store_true',
help="Whether to tune train or validation metric"
)
parser.add_argument(
"--model",
type=str,
required=True,
help="Define which model to load"
)
parser.add_argument(
"--loss",
type=str,
default='CE',
choices=['CE', 'BCE'],
help="Define which model to load"
)
parser.add_argument(
'--weight-decay',
type=float,
default=0.,
help='Weight decay (default: 0.)'
)
parser.add_argument(
"--track-grad-norm",
default=False,
action='store_true',
help="Whether to track grad norm on validation set"
)
parser.add_argument(
"--nc-regularizer",
default=False,
action='store_true',
help="Whether to include non-convex regularizer"
)
parser.add_argument(
"--nc-regularizer-value",
type=float,
default=1e-3,
help="Non-convex regularizer coefficient"
)
# SETUP ARGUMENTS
parser.add_argument(
"--checkpoint-dir",
type=str,
default='../check_points',
help="Directory to persist run meta data_preprocess, e.g. best/last models."
)
parser.add_argument(
"--data-path",
type=str,
default="../data/",
help="Base root directory for the dataset."
)
parser.add_argument(
"--gpu",
type=str,
default="0",
help="Define on which GPU to run the model (comma-separated for multiple). If -1, use CPU."
)
parser.add_argument(
"-n", "--num-workers",
type=int,
default=4,
help="Num workers for dataset loading"
)
parser.add_argument(
"--deterministic",
action="store_true",
default=False,
help="Run deterministically for reproducibility."
)
parser.add_argument(
"--manual-seed",
type=int,
default=123,
help="Random seed to use."
)
parser.add_argument(
"--eval-every",
type=int,
default=1,
help="How often to do validation."
)
parser.add_argument(
"--run-id",
type=str,
required=True,
help="Name of the Experiment (no default)"
)
parser.add_argument(
"--loglevel",
type=str,
choices=["DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"],
default="INFO"
)
now = datetime.now()
now = now.strftime("%Y%m%d%H%M%S")
os.makedirs("../logs/", exist_ok=True)
parser.add_argument(
"--logfile",
type=str,
default=f"../logs/log_{now}.txt"
)
# Evaluation mode, do not run training
parser.add_argument("--evaluate", action='store_true', default=False, help="Evaluation or Training mode")
args = parser.parse_args()
transform_gpu_args(args)
return args
def initialise_arg_parser(args, description):
parser = argparse.ArgumentParser(args, description=description)
return parser
def transform_gpu_args(args):
if args.gpu == "-1":
args.gpu = "cpu"
else:
gpu_str_arg = args.gpu.split(',')
if len(gpu_str_arg) > 1:
args.gpu = sorted([int(card) for card in gpu_str_arg])
else:
args.gpu = f"cuda:{args.gpu}"
| 25.91623 | 109 | 0.56 | import argparse
from datetime import datetime
import os
def parse_args(args):
parser = initialise_arg_parser(args, 'Variance Reduction.')
parser.add_argument(
"--total-runs",
type=int,
default=3,
help="Number of times to redo run, we increase seed by 1 if deterministic",
)
parser.add_argument(
"--epochs",
type=int,
default=1,
help="Number of epochs to run",
)
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='Initial learning rate (default: .1)'
)
parser.add_argument(
"--tune-lr",
default=False,
action='store_true',
help="Whether to tune step size during optimization procedure, based on single run"
)
parser.add_argument(
"-b", "--batch-size",
type=int,
default=32,
help="Static batch size for computation, for speed select as large as possible"
)
parser.add_argument(
"--method",
type=str,
required=True,
help="Define which method to run"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
choices=[
"mnist", "cifar10", "cifar100", "mushrooms", "w8a", "ijcnn1", "a9a", "phishing"],
help="Define which dataset to load"
)
parser.add_argument(
"--metric",
type=str,
default='top_1_acc',
choices=["loss", "top_1_acc"],
help="Define which metric to optimize."
)
parser.add_argument(
"--train-metric",
default=False,
action='store_true',
help="Whether to tune train or validation metric"
)
parser.add_argument(
"--model",
type=str,
required=True,
help="Define which model to load"
)
parser.add_argument(
"--loss",
type=str,
default='CE',
choices=['CE', 'BCE'],
help="Define which model to load"
)
parser.add_argument(
'--weight-decay',
type=float,
default=0.,
help='Weight decay (default: 0.)'
)
parser.add_argument(
"--track-grad-norm",
default=False,
action='store_true',
help="Whether to track grad norm on validation set"
)
parser.add_argument(
"--nc-regularizer",
default=False,
action='store_true',
help="Whether to include non-convex regularizer"
)
parser.add_argument(
"--nc-regularizer-value",
type=float,
default=1e-3,
help="Non-convex regularizer coefficient"
)
parser.add_argument(
"--checkpoint-dir",
type=str,
default='../check_points',
help="Directory to persist run meta data_preprocess, e.g. best/last models."
)
parser.add_argument(
"--data-path",
type=str,
default="../data/",
help="Base root directory for the dataset."
)
parser.add_argument(
"--gpu",
type=str,
default="0",
help="Define on which GPU to run the model (comma-separated for multiple). If -1, use CPU."
)
parser.add_argument(
"-n", "--num-workers",
type=int,
default=4,
help="Num workers for dataset loading"
)
parser.add_argument(
"--deterministic",
action="store_true",
default=False,
help="Run deterministically for reproducibility."
)
parser.add_argument(
"--manual-seed",
type=int,
default=123,
help="Random seed to use."
)
parser.add_argument(
"--eval-every",
type=int,
default=1,
help="How often to do validation."
)
parser.add_argument(
"--run-id",
type=str,
required=True,
help="Name of the Experiment (no default)"
)
parser.add_argument(
"--loglevel",
type=str,
choices=["DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"],
default="INFO"
)
now = datetime.now()
now = now.strftime("%Y%m%d%H%M%S")
os.makedirs("../logs/", exist_ok=True)
parser.add_argument(
"--logfile",
type=str,
default=f"../logs/log_{now}.txt"
)
parser.add_argument("--evaluate", action='store_true', default=False, help="Evaluation or Training mode")
args = parser.parse_args()
transform_gpu_args(args)
return args
def initialise_arg_parser(args, description):
parser = argparse.ArgumentParser(args, description=description)
return parser
def transform_gpu_args(args):
if args.gpu == "-1":
args.gpu = "cpu"
else:
gpu_str_arg = args.gpu.split(',')
if len(gpu_str_arg) > 1:
args.gpu = sorted([int(card) for card in gpu_str_arg])
else:
args.gpu = f"cuda:{args.gpu}"
| true | true |
f7f60a75acdd9326848b43492463db3bfa171e55 | 5,541 | py | Python | IPython/frontend/html/notebook/azurenbmanager.py | 3kwa/ipython | a5922fd39ed4b2067d64b285125278a850bb129f | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-13T23:06:43.000Z | 2022-03-13T23:06:43.000Z | IPython/frontend/html/notebook/azurenbmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/frontend/html/notebook/azurenbmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
] | null | null | null | """A notebook manager that uses Azure blob storage.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import azure
from azure.storage import BlobService
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Instance
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class AzureNotebookManager(NotebookManager):
account_name = Unicode('', config=True, help='Azure storage account name.')
account_key = Unicode('', config=True, help='Azure storage account key.')
container = Unicode('', config=True, help='Container name for notebooks.')
blob_service_host_base = Unicode('.blob.core.windows.net', config=True,
help='The basename for the blob service URL. If running on the preview site this '
'will be .blob.core.azure-preview.com.')
def _blob_service_host_base_changed(self, new):
self._update_service_host_base(new)
blob_service = Instance('azure.storage.BlobService')
def _blob_service_default(self):
return BlobService(account_name=self.account_name, account_key=self.account_key)
def __init__(self, **kwargs):
super(AzureNotebookManager, self).__init__(**kwargs)
self._update_service_host_base(self.blob_service_host_base)
self._create_container()
def _update_service_host_base(self, shb):
azure.BLOB_SERVICE_HOST_BASE = shb
def _create_container(self):
self.blob_service.create_container(self.container)
def load_notebook_names(self):
"""On startup load the notebook ids and names from Azure.
The blob names are the notebook ids and the notebook names are stored
as blob metadata.
"""
self.mapping = {}
blobs = self.blob_service.list_blobs(self.container)
ids = [blob.name for blob in blobs]
for id in ids:
md = self.blob_service.get_blob_metadata(self.container, id)
name = md['x-ms-meta-nbname']
self.mapping[id] = name
def list_notebooks(self):
"""List all notebooks in the container.
This version uses `self.mapping` as the authoritative notebook list.
"""
data = [dict(notebook_id=id,name=name) for id, name in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def read_notebook_object(self, notebook_id):
"""Get the object representation of a notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
s = self.blob_service.get_blob(self.container, notebook_id)
except:
raise web.HTTPError(500, u'Notebook cannot be read.')
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Todo: The last modified should actually be saved in the notebook document.
# We are just using the current datetime until that is implemented.
last_modified = datetime.datetime.utcnow()
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
data = current.writes(nb, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
metadata = {'nbname': new_name}
try:
self.blob_service.put_blob(self.container, notebook_id, data, 'BlockBlob', x_ms_meta_name_values=metadata)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
self.mapping[notebook_id] = new_name
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
self.blob_service.delete_blob(self.container, notebook_id)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while deleting notebook: %s' % e)
else:
self.delete_notebook_id(notebook_id)
def info_string(self):
return "Serving notebooks from Azure storage: %s, %s" % (self.account_name, self.container)
| 38.479167 | 118 | 0.608554 |
import datetime
import azure
from azure.storage import BlobService
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Instance
class AzureNotebookManager(NotebookManager):
account_name = Unicode('', config=True, help='Azure storage account name.')
account_key = Unicode('', config=True, help='Azure storage account key.')
container = Unicode('', config=True, help='Container name for notebooks.')
blob_service_host_base = Unicode('.blob.core.windows.net', config=True,
help='The basename for the blob service URL. If running on the preview site this '
'will be .blob.core.azure-preview.com.')
def _blob_service_host_base_changed(self, new):
self._update_service_host_base(new)
blob_service = Instance('azure.storage.BlobService')
def _blob_service_default(self):
return BlobService(account_name=self.account_name, account_key=self.account_key)
def __init__(self, **kwargs):
super(AzureNotebookManager, self).__init__(**kwargs)
self._update_service_host_base(self.blob_service_host_base)
self._create_container()
def _update_service_host_base(self, shb):
azure.BLOB_SERVICE_HOST_BASE = shb
def _create_container(self):
self.blob_service.create_container(self.container)
def load_notebook_names(self):
self.mapping = {}
blobs = self.blob_service.list_blobs(self.container)
ids = [blob.name for blob in blobs]
for id in ids:
md = self.blob_service.get_blob_metadata(self.container, id)
name = md['x-ms-meta-nbname']
self.mapping[id] = name
def list_notebooks(self):
data = [dict(notebook_id=id,name=name) for id, name in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
return data
def read_notebook_object(self, notebook_id):
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
s = self.blob_service.get_blob(self.container, notebook_id)
except:
raise web.HTTPError(500, u'Notebook cannot be read.')
try:
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
last_modified = datetime.datetime.utcnow()
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
data = current.writes(nb, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
metadata = {'nbname': new_name}
try:
self.blob_service.put_blob(self.container, notebook_id, data, 'BlockBlob', x_ms_meta_name_values=metadata)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
self.mapping[notebook_id] = new_name
return notebook_id
def delete_notebook(self, notebook_id):
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
self.blob_service.delete_blob(self.container, notebook_id)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while deleting notebook: %s' % e)
else:
self.delete_notebook_id(notebook_id)
def info_string(self):
return "Serving notebooks from Azure storage: %s, %s" % (self.account_name, self.container)
| true | true |
f7f60b62d5a1159087e0cfa1e581c8142c64677a | 9,448 | py | Python | pyro/infer/importance.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 2 | 2020-06-05T20:40:50.000Z | 2020-09-05T15:39:48.000Z | pyro/infer/importance.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2020-05-12T16:26:21.000Z | 2020-05-12T17:23:13.000Z | pyro/infer/importance.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2021-04-11T21:37:25.000Z | 2021-04-11T21:37:25.000Z | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import warnings
import torch
import pyro
import pyro.poutine as poutine
from pyro.ops.stats import fit_generalized_pareto
from .abstract_infer import TracePosterior
from .enum import get_importance_trace
class Importance(TracePosterior):
"""
:param model: probabilistic model defined as a function
:param guide: guide used for sampling defined as a function
:param num_samples: number of samples to draw from the guide (default 10)
This method performs posterior inference by importance sampling
using the guide as the proposal distribution.
If no guide is provided, it defaults to proposing from the model's prior.
"""
def __init__(self, model, guide=None, num_samples=None):
"""
Constructor. default to num_samples = 10, guide = model
"""
super().__init__()
if num_samples is None:
num_samples = 10
warnings.warn("num_samples not provided, defaulting to {}".format(num_samples))
if guide is None:
# propose from the prior by making a guide from the model by hiding observes
guide = poutine.block(model, hide_types=["observe"])
self.num_samples = num_samples
self.model = model
self.guide = guide
def _traces(self, *args, **kwargs):
"""
Generator of weighted samples from the proposal distribution.
"""
for i in range(self.num_samples):
guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(
poutine.replay(self.model, trace=guide_trace)).get_trace(*args, **kwargs)
log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
yield (model_trace, log_weight)
def get_log_normalizer(self):
"""
Estimator of the normalizing constant of the target distribution.
(mean of the unnormalized weights)
"""
# ensure list is not empty
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_num_samples = torch.log(torch.tensor(self.num_samples * 1.))
return torch.logsumexp(log_w - log_num_samples, 0)
else:
warnings.warn("The log_weights list is empty, can not compute normalizing constant estimate.")
def get_normalized_weights(self, log_scale=False):
"""
Compute the normalized importance weights.
"""
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_w_norm = log_w - torch.logsumexp(log_w, 0)
return log_w_norm if log_scale else torch.exp(log_w_norm)
else:
warnings.warn("The log_weights list is empty. There is nothing to normalize.")
def get_ESS(self):
"""
Compute (Importance Sampling) Effective Sample Size (ESS).
"""
if self.log_weights:
log_w_norm = self.get_normalized_weights(log_scale=True)
ess = torch.exp(-torch.logsumexp(2*log_w_norm, 0))
else:
warnings.warn("The log_weights list is empty, effective sample size is zero.")
ess = 0
return ess
def vectorized_importance_weights(model, guide, *args, **kwargs):
"""
:param model: probabilistic model defined as a function
:param guide: guide used for sampling defined as a function
:param num_samples: number of samples to draw from the guide (default 1)
:param int max_plate_nesting: Bound on max number of nested :func:`pyro.plate` contexts.
:param bool normalized: set to True to return self-normalized importance weights
:returns: returns a ``(num_samples,)``-shaped tensor of importance weights
and the model and guide traces that produced them
Vectorized computation of importance weights for models with static structure::
log_weights, model_trace, guide_trace = \\
vectorized_importance_weights(model, guide, *args,
num_samples=1000,
max_plate_nesting=4,
normalized=False)
"""
num_samples = kwargs.pop("num_samples", 1)
max_plate_nesting = kwargs.pop("max_plate_nesting", None)
normalized = kwargs.pop("normalized", False)
if max_plate_nesting is None:
raise ValueError("must provide max_plate_nesting")
max_plate_nesting += 1
def vectorize(fn):
def _fn(*args, **kwargs):
with pyro.plate("num_particles_vectorized", num_samples, dim=-max_plate_nesting):
return fn(*args, **kwargs)
return _fn
model_trace, guide_trace = get_importance_trace(
"flat", max_plate_nesting, vectorize(model), vectorize(guide), args, kwargs)
guide_trace.pack_tensors()
model_trace.pack_tensors(guide_trace.plate_to_symbol)
if num_samples == 1:
log_weights = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
else:
wd = guide_trace.plate_to_symbol["num_particles_vectorized"]
log_weights = 0.
for site in model_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights += torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
for site in guide_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights -= torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
if normalized:
log_weights = log_weights - torch.logsumexp(log_weights)
return log_weights, model_trace, guide_trace
@torch.no_grad()
def psis_diagnostic(model, guide, *args, **kwargs):
"""
Computes the Pareto tail index k for a model/guide pair using the technique
described in [1], which builds on previous work in [2]. If :math:`0 < k < 0.5`
the guide is a good approximation to the model posterior, in the sense
described in [1]. If :math:`0.5 \\le k \\le 0.7`, the guide provides a suboptimal
approximation to the posterior, but may still be useful in practice. If
:math:`k > 0.7` the guide program provides a poor approximation to the full
posterior, and caution should be used when using the guide. Note, however,
that a guide may be a poor fit to the full posterior while still yielding
reasonable model predictions. If :math:`k < 0.0` the importance weights
corresponding to the model and guide appear to be bounded from above; this
would be a bizarre outcome for a guide trained via ELBO maximization. Please
see [1] for a more complete discussion of how the tail index k should be
interpreted.
Please be advised that a large number of samples may be required for an
accurate estimate of k.
Note that we assume that the model and guide are both vectorized and have
static structure. As is canonical in Pyro, the args and kwargs are passed
to the model and guide.
References
[1] 'Yes, but Did It Work?: Evaluating Variational Inference.'
Yuling Yao, Aki Vehtari, Daniel Simpson, Andrew Gelman
[2] 'Pareto Smoothed Importance Sampling.'
Aki Vehtari, Andrew Gelman, Jonah Gabry
:param callable model: the model program.
:param callable guide: the guide program.
:param int num_particles: the total number of times we run the model and guide in
order to compute the diagnostic. defaults to 1000.
:param max_simultaneous_particles: the maximum number of simultaneous samples drawn
from the model and guide. defaults to `num_particles`. `num_particles` must be
divisible by `max_simultaneous_particles`. compute the diagnostic. defaults to 1000.
:param int max_plate_nesting: optional bound on max number of nested :func:`pyro.plate`
contexts in the model/guide. defaults to 7.
:returns float: the PSIS diagnostic k
"""
num_particles = kwargs.pop('num_particles', 1000)
max_simultaneous_particles = kwargs.pop('max_simultaneous_particles', num_particles)
max_plate_nesting = kwargs.pop('max_plate_nesting', 7)
if num_particles % max_simultaneous_particles != 0:
raise ValueError("num_particles must be divisible by max_simultaneous_particles.")
N = num_particles // max_simultaneous_particles
log_weights = [vectorized_importance_weights(model, guide, num_samples=max_simultaneous_particles,
max_plate_nesting=max_plate_nesting,
*args, **kwargs)[0] for _ in range(N)]
log_weights = torch.cat(log_weights)
log_weights -= log_weights.max()
log_weights = torch.sort(log_weights, descending=False)[0]
cutoff_index = - int(math.ceil(min(0.2 * num_particles, 3.0 * math.sqrt(num_particles)))) - 1
lw_cutoff = max(math.log(1.0e-15), log_weights[cutoff_index])
lw_tail = log_weights[log_weights > lw_cutoff]
if len(lw_tail) < 10:
warnings.warn("Not enough tail samples to compute PSIS diagnostic; increase num_particles.")
k = float('inf')
else:
k, _ = fit_generalized_pareto(lw_tail.exp() - math.exp(lw_cutoff))
return k
| 43.141553 | 106 | 0.662257 |
import math
import warnings
import torch
import pyro
import pyro.poutine as poutine
from pyro.ops.stats import fit_generalized_pareto
from .abstract_infer import TracePosterior
from .enum import get_importance_trace
class Importance(TracePosterior):
def __init__(self, model, guide=None, num_samples=None):
super().__init__()
if num_samples is None:
num_samples = 10
warnings.warn("num_samples not provided, defaulting to {}".format(num_samples))
if guide is None:
guide = poutine.block(model, hide_types=["observe"])
self.num_samples = num_samples
self.model = model
self.guide = guide
def _traces(self, *args, **kwargs):
for i in range(self.num_samples):
guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(
poutine.replay(self.model, trace=guide_trace)).get_trace(*args, **kwargs)
log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
yield (model_trace, log_weight)
def get_log_normalizer(self):
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_num_samples = torch.log(torch.tensor(self.num_samples * 1.))
return torch.logsumexp(log_w - log_num_samples, 0)
else:
warnings.warn("The log_weights list is empty, can not compute normalizing constant estimate.")
def get_normalized_weights(self, log_scale=False):
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_w_norm = log_w - torch.logsumexp(log_w, 0)
return log_w_norm if log_scale else torch.exp(log_w_norm)
else:
warnings.warn("The log_weights list is empty. There is nothing to normalize.")
def get_ESS(self):
if self.log_weights:
log_w_norm = self.get_normalized_weights(log_scale=True)
ess = torch.exp(-torch.logsumexp(2*log_w_norm, 0))
else:
warnings.warn("The log_weights list is empty, effective sample size is zero.")
ess = 0
return ess
def vectorized_importance_weights(model, guide, *args, **kwargs):
num_samples = kwargs.pop("num_samples", 1)
max_plate_nesting = kwargs.pop("max_plate_nesting", None)
normalized = kwargs.pop("normalized", False)
if max_plate_nesting is None:
raise ValueError("must provide max_plate_nesting")
max_plate_nesting += 1
def vectorize(fn):
def _fn(*args, **kwargs):
with pyro.plate("num_particles_vectorized", num_samples, dim=-max_plate_nesting):
return fn(*args, **kwargs)
return _fn
model_trace, guide_trace = get_importance_trace(
"flat", max_plate_nesting, vectorize(model), vectorize(guide), args, kwargs)
guide_trace.pack_tensors()
model_trace.pack_tensors(guide_trace.plate_to_symbol)
if num_samples == 1:
log_weights = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
else:
wd = guide_trace.plate_to_symbol["num_particles_vectorized"]
log_weights = 0.
for site in model_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights += torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
for site in guide_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights -= torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
if normalized:
log_weights = log_weights - torch.logsumexp(log_weights)
return log_weights, model_trace, guide_trace
@torch.no_grad()
def psis_diagnostic(model, guide, *args, **kwargs):
num_particles = kwargs.pop('num_particles', 1000)
max_simultaneous_particles = kwargs.pop('max_simultaneous_particles', num_particles)
max_plate_nesting = kwargs.pop('max_plate_nesting', 7)
if num_particles % max_simultaneous_particles != 0:
raise ValueError("num_particles must be divisible by max_simultaneous_particles.")
N = num_particles // max_simultaneous_particles
log_weights = [vectorized_importance_weights(model, guide, num_samples=max_simultaneous_particles,
max_plate_nesting=max_plate_nesting,
*args, **kwargs)[0] for _ in range(N)]
log_weights = torch.cat(log_weights)
log_weights -= log_weights.max()
log_weights = torch.sort(log_weights, descending=False)[0]
cutoff_index = - int(math.ceil(min(0.2 * num_particles, 3.0 * math.sqrt(num_particles)))) - 1
lw_cutoff = max(math.log(1.0e-15), log_weights[cutoff_index])
lw_tail = log_weights[log_weights > lw_cutoff]
if len(lw_tail) < 10:
warnings.warn("Not enough tail samples to compute PSIS diagnostic; increase num_particles.")
k = float('inf')
else:
k, _ = fit_generalized_pareto(lw_tail.exp() - math.exp(lw_cutoff))
return k
| true | true |
f7f60b9cee51f3d268a030062f033bd33c469b0b | 924 | py | Python | docs/conf.py | datadesk/python-muckrock | fcea70270e9ff596e18d03892f60bfc98e171d30 | [
"MIT"
] | 4 | 2019-01-29T15:41:29.000Z | 2019-12-03T18:07:13.000Z | docs/conf.py | datadesk/python-muckrock | fcea70270e9ff596e18d03892f60bfc98e171d30 | [
"MIT"
] | 8 | 2019-01-29T16:56:23.000Z | 2019-02-07T04:17:05.000Z | docs/conf.py | datadesk/python-muckrock | fcea70270e9ff596e18d03892f60bfc98e171d30 | [
"MIT"
] | 1 | 2019-01-29T15:42:06.000Z | 2019-01-29T15:42:06.000Z | """Configure Sphinx configuration."""
import os
import sys
from datetime import datetime
# Insert the parent directory into the path
sys.path.insert(0, os.path.abspath(".."))
extensions = [
"myst_parser",
"sphinx.ext.autodoc",
"sphinxcontrib.napoleon",
"sphinx_click",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "python-muckrock"
year = datetime.now().year
copyright = f"{year} Ben Welsh"
exclude_patterns = ["_build"]
html_theme = "alabaster"
html_sidebars = {
"**": [
# "about.html",
# "navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
html_theme_options = {
"canonical_url": f"https://palewi.re/docs/{project}/",
"show_powered_by": False,
"show_relbar_bottom": True,
}
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
pygments_style = "sphinx"
| 19.659574 | 58 | 0.649351 | import os
import sys
from datetime import datetime
sys.path.insert(0, os.path.abspath(".."))
extensions = [
"myst_parser",
"sphinx.ext.autodoc",
"sphinxcontrib.napoleon",
"sphinx_click",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "python-muckrock"
year = datetime.now().year
copyright = f"{year} Ben Welsh"
exclude_patterns = ["_build"]
html_theme = "alabaster"
html_sidebars = {
"**": [
"relations.html",
"searchbox.html",
"donate.html",
]
}
html_theme_options = {
"canonical_url": f"https://palewi.re/docs/{project}/",
"show_powered_by": False,
"show_relbar_bottom": True,
}
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
pygments_style = "sphinx"
| true | true |
f7f60bcda4ea0152233fd99b1b9eeda41b0a4739 | 3,056 | py | Python | quantum/tests/unit/test_quantum_manager.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | 1 | 2021-04-18T15:23:19.000Z | 2021-04-18T15:23:19.000Z | quantum/tests/unit/test_quantum_manager.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | null | null | null | quantum/tests/unit/test_quantum_manager.py | ericwanghp/quantum | 1c0d543552c38a5eac6dd08580b73725c5757876 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import types
import fixtures
from oslo.config import cfg
from quantum.common import config
from quantum.common.test_lib import test_config
from quantum.manager import QuantumManager
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from quantum.tests import base
from quantum.tests.unit import dummy_plugin
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'quantum.db.db_base_plugin_v2.QuantumDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class QuantumManagerTestCase(base.BaseTestCase):
def setUp(self):
super(QuantumManagerTestCase, self).setUp()
args = ['--config-file', etcdir('quantum.conf.test')]
# If test_config specifies some config-file, use it, as well
config.parse(args=args)
self.addCleanup(cfg.CONF.reset)
self.useFixture(
fixtures.MonkeyPatch('quantum.manager.QuantumManager._instance'))
def test_service_plugin_is_loaded(self):
cfg.CONF.set_override("core_plugin",
test_config.get('plugin_name_v2',
DB_PLUGIN_KLASS))
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"DummyServicePlugin"])
mgr = QuantumManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type QuantumDummyPlugin")
def test_multiple_plugins_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin",
"quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin"])
try:
QuantumManager.get_instance().get_service_plugins()
self.assertTrue(False,
"Shouldn't load multiple plugins "
"for the same type")
except Exception as e:
LOG.debug(str(e))
| 36.380952 | 78 | 0.64627 |
import os
import types
import fixtures
from oslo.config import cfg
from quantum.common import config
from quantum.common.test_lib import test_config
from quantum.manager import QuantumManager
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from quantum.tests import base
from quantum.tests.unit import dummy_plugin
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'quantum.db.db_base_plugin_v2.QuantumDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class QuantumManagerTestCase(base.BaseTestCase):
def setUp(self):
super(QuantumManagerTestCase, self).setUp()
args = ['--config-file', etcdir('quantum.conf.test')]
config.parse(args=args)
self.addCleanup(cfg.CONF.reset)
self.useFixture(
fixtures.MonkeyPatch('quantum.manager.QuantumManager._instance'))
def test_service_plugin_is_loaded(self):
cfg.CONF.set_override("core_plugin",
test_config.get('plugin_name_v2',
DB_PLUGIN_KLASS))
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"DummyServicePlugin"])
mgr = QuantumManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type QuantumDummyPlugin")
def test_multiple_plugins_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin",
"quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin"])
try:
QuantumManager.get_instance().get_service_plugins()
self.assertTrue(False,
"Shouldn't load multiple plugins "
"for the same type")
except Exception as e:
LOG.debug(str(e))
| true | true |
f7f60da0e90f8c3d5929c7b3c497a48882ce112c | 4,709 | py | Python | datasets/binary.py | shuxiaobo/TextTransferLearning | 6fb93bd43dde7012ece1bbe7a5beee0a991ccc43 | [
"MIT"
] | null | null | null | datasets/binary.py | shuxiaobo/TextTransferLearning | 6fb93bd43dde7012ece1bbe7a5beee0a991ccc43 | [
"MIT"
] | null | null | null | datasets/binary.py | shuxiaobo/TextTransferLearning | 6fb93bd43dde7012ece1bbe7a5beee0a991ccc43 | [
"MIT"
] | null | null | null | '''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA...
'''
from __future__ import absolute_import, division, unicode_literals
import io
import os
import logging
from utils.util import logger
from torch.utils.data import Dataset
from utils.util import prepare_dictionary
from tensorflow.python.keras.preprocessing import sequence
class BinaryClassifierEval(Dataset):
def __init__(self, args, num_class = 2, seed = 1111, filename = None):
"""
:param args:
:param num_class: result class number
:param seed: random seed
:param filename: train | valid | test filename, default is train
"""
self.seed = seed
self.args = args
self.num_class = num_class
self.max_len = 0
filename = filename if filename else args.train_file
self.data_x, self.data_y = self.load_file(os.path.join(self.args.tmp_dir, self.__class__.__name__, filename))
self.n_samples = len(self.data_x)
self.word_file = os.path.join(args.tmp_dir, self.__class__.__name__, args.word_file)
if os.path.isfile(self.word_file) and os.path.getsize(self.word_file) > 0:
self.word2id = self.get_word_index(self.word_file)
else:
self.word2id = self.prepare_dict(self.word_file)
def load_file(self, fpath):
"""
load the data file with format : x \t y
Note: the data_y should be the sparse id, and start from 0.
for example if you have 3 class, the id must range in (0, 1, 2)
:param fpath: file path
:return: data_x, data_y
"""
with io.open(fpath, 'r', encoding = 'utf-8') as f:
data_x = list()
data_y = list()
for line in f.read().splitlines():
line = line.strip().split(' ')
data_x.append(line[:-1])
data_y.append(int(line[-1]))
self.max_len = len(line[:-1]) if len(line[:-1]) > self.max_len else self.max_len
return data_x, data_y
def prepare_dict(self, file_name):
logger("Prepare the dictionary for the {}...".format(self.__class__.__name__))
word2id = prepare_dictionary(data = self.data_x, dict_path = file_name, exclude_n = self.args.skip_top, max_size = self.args.num_words)
logger("Word2id size : %d" % len(word2id))
return word2id
def get_word_index(self, path = None):
if not path:
path = self.args.tmp_dir + self.__class__.__name__ + self.args.word_file
word2id = dict()
with open(path, mode = 'r', encoding = 'utf-8') as f:
for l in f:
word2id.setdefault(l.strip(), len(word2id))
logger('Word2id size : %d' % len(word2id))
return word2id
@staticmethod
def batchfy_fn(data):
x = [d[0] for d in data]
y = [d[1] for d in data]
max_len = max(map(len, x))
return sequence.pad_sequences(x, maxlen = max_len, padding = 'post'), y
def __getitem__(self, index):
result = [self.word2id[d] if self.word2id.get(d) else self.word2id['_<UNKNOW>'] for d in self.data_x[index]]
return result, self.data_y[index]
def __len__(self):
return self.n_samples
class CR(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class MR(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class SUBJ(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class MPQA(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class Kaggle(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class TREC(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
| 38.284553 | 143 | 0.62285 | from __future__ import absolute_import, division, unicode_literals
import io
import os
import logging
from utils.util import logger
from torch.utils.data import Dataset
from utils.util import prepare_dictionary
from tensorflow.python.keras.preprocessing import sequence
class BinaryClassifierEval(Dataset):
def __init__(self, args, num_class = 2, seed = 1111, filename = None):
self.seed = seed
self.args = args
self.num_class = num_class
self.max_len = 0
filename = filename if filename else args.train_file
self.data_x, self.data_y = self.load_file(os.path.join(self.args.tmp_dir, self.__class__.__name__, filename))
self.n_samples = len(self.data_x)
self.word_file = os.path.join(args.tmp_dir, self.__class__.__name__, args.word_file)
if os.path.isfile(self.word_file) and os.path.getsize(self.word_file) > 0:
self.word2id = self.get_word_index(self.word_file)
else:
self.word2id = self.prepare_dict(self.word_file)
def load_file(self, fpath):
with io.open(fpath, 'r', encoding = 'utf-8') as f:
data_x = list()
data_y = list()
for line in f.read().splitlines():
line = line.strip().split(' ')
data_x.append(line[:-1])
data_y.append(int(line[-1]))
self.max_len = len(line[:-1]) if len(line[:-1]) > self.max_len else self.max_len
return data_x, data_y
def prepare_dict(self, file_name):
logger("Prepare the dictionary for the {}...".format(self.__class__.__name__))
word2id = prepare_dictionary(data = self.data_x, dict_path = file_name, exclude_n = self.args.skip_top, max_size = self.args.num_words)
logger("Word2id size : %d" % len(word2id))
return word2id
def get_word_index(self, path = None):
if not path:
path = self.args.tmp_dir + self.__class__.__name__ + self.args.word_file
word2id = dict()
with open(path, mode = 'r', encoding = 'utf-8') as f:
for l in f:
word2id.setdefault(l.strip(), len(word2id))
logger('Word2id size : %d' % len(word2id))
return word2id
@staticmethod
def batchfy_fn(data):
x = [d[0] for d in data]
y = [d[1] for d in data]
max_len = max(map(len, x))
return sequence.pad_sequences(x, maxlen = max_len, padding = 'post'), y
def __getitem__(self, index):
result = [self.word2id[d] if self.word2id.get(d) else self.word2id['_<UNKNOW>'] for d in self.data_x[index]]
return result, self.data_y[index]
def __len__(self):
return self.n_samples
class CR(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class MR(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class SUBJ(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class MPQA(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class Kaggle(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
class TREC(BinaryClassifierEval):
def __init__(self, args, seed = 1111, filename = None):
logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\n\n')
super(self.__class__, self).__init__(args, seed, filename)
| true | true |
f7f60dcda64a6130fe6bd3fb40b538c0c91ba495 | 43 | py | Python | tests/__init__.py | DavidVinicius/python_artmap | 358d4258a80ebaf0418d48f6dd5370832774f07f | [
"MIT"
] | 2 | 2021-11-17T13:21:04.000Z | 2022-03-30T01:58:14.000Z | tests/__init__.py | DavidVinicius/python_artmap | 358d4258a80ebaf0418d48f6dd5370832774f07f | [
"MIT"
] | null | null | null | tests/__init__.py | DavidVinicius/python_artmap | 358d4258a80ebaf0418d48f6dd5370832774f07f | [
"MIT"
] | 2 | 2021-12-21T00:43:58.000Z | 2022-03-07T09:34:12.000Z | """Unit test package for python_artmap."""
| 21.5 | 42 | 0.72093 | true | true | |
f7f60e058067e9ff91e5a961528a3bbd7aa12b7d | 1,012 | py | Python | stubs.min/System/ComponentModel/__init___parts/EditorBrowsableState.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/ComponentModel/__init___parts/EditorBrowsableState.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/ComponentModel/__init___parts/EditorBrowsableState.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class EditorBrowsableState(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the browsable state of a property or method from within an editor.
enum EditorBrowsableState,values: Advanced (2),Always (0),Never (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Advanced=None
Always=None
Never=None
value__=None
| 28.914286 | 215 | 0.670949 | class EditorBrowsableState(Enum,IComparable,IFormattable,IConvertible):
pass
""" __format__(formattable: IFormattable,format: str) -> str """
pass
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Advanced=None
Always=None
Never=None
value__=None
| true | true |
f7f60f1a930ca663a148143743590384100e3f24 | 8,621 | py | Python | tests/test_data_loaders.py | ammokhov/cloudformation-cli | 419d22a2b9aaaa8b12ec2b4cd0d6b04058b9f8f5 | [
"Apache-2.0"
] | null | null | null | tests/test_data_loaders.py | ammokhov/cloudformation-cli | 419d22a2b9aaaa8b12ec2b4cd0d6b04058b9f8f5 | [
"Apache-2.0"
] | null | null | null | tests/test_data_loaders.py | ammokhov/cloudformation-cli | 419d22a2b9aaaa8b12ec2b4cd0d6b04058b9f8f5 | [
"Apache-2.0"
] | null | null | null | # fixture and parameter have the same name
# pylint: disable=redefined-outer-name
# pylint: disable=import-outside-toplevel
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from pathlib import Path
from subprocess import check_output
from unittest.mock import ANY, create_autospec, patch
import pytest
import yaml
from jsonschema.exceptions import RefResolutionError, ValidationError
from pytest_localserver.http import Request, Response, WSGIServer
from rpdk.core.data_loaders import (
STDIN_NAME,
get_file_base_uri,
load_resource_spec,
make_validator,
resource_json,
resource_stream,
resource_yaml,
)
from rpdk.core.exceptions import InternalError, SpecValidationError
from rpdk.core.plugin_base import LanguagePlugin
BASEDIR = Path(__file__).parent # tests/test_data_loaders.py -> tests/
# Lonely continuation byte is invalid
# https://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
INVALID_UTF8 = b"\x80"
BASIC_SCHEMA = {
"typeName": "AWS::FOO::BAR",
"description": "test schema",
"properties": {"foo": {"type": "string"}},
"primaryIdentifier": ["/properties/foo"],
"additionalProperties": False,
}
def json_s(obj):
return StringIO(json.dumps(obj))
@contextmanager
def wsgi_serve(application):
server = WSGIServer(application=application)
try:
server.start()
yield server
finally:
server.stop()
def test_load_resource_spec_invalid_json():
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(StringIO('{"foo": "aaaaa}'))
assert "line 1" in str(excinfo.value)
assert "column 9" in str(excinfo.value)
def test_load_resource_spec_empty_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(StringIO(""))
def test_load_resource_spec_boolean_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(json_s(True))
def test_load_resource_spec_empty_object_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(json_s({}))
def json_files_params(path, glob="*.json"):
return tuple(pytest.param(p, id=p.name) for p in path.glob(glob))
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR.parent / "examples" / "schema", "*-*-*.json")
)
def test_load_resource_spec_example_spec_is_valid(example):
with example.open("r", encoding="utf-8") as f:
assert load_resource_spec(f)
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR / "data" / "schema" / "valid")
)
def test_load_resource_spec_valid_snippets(example):
with example.open("r", encoding="utf-8") as f:
assert load_resource_spec(f)
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR / "data" / "schema" / "invalid")
)
def test_load_resource_spec_invalid_snippets(example):
with example.open("r", encoding="utf-8") as f:
with pytest.raises(SpecValidationError):
load_resource_spec(f)
def test_load_resource_spec_remote_key_is_invalid():
schema = {
"typeName": "AWS::FOO::BAR",
"description": "test schema",
"properties": {"foo": {"type": "string"}},
"primaryIdentifier": ["/properties/foo"],
"remote": {},
}
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(json_s(schema))
assert "remote" in str(excinfo.value)
def test_argparse_stdin_name():
"""By default, pytest messes with stdin and stdout, which prevents me from
writing a test to check we have the right magic name that argparse uses
for stdin. So I invoke a separate, pristine python process to check.
"""
code = "; ".join(
"""import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", type=argparse.FileType("r"))
args = parser.parse_args(["-"])
print(args.file.name)
""".splitlines()
)
raw = check_output(["python3", "-c", code])
result = raw.rstrip().decode("utf-8") # remove trailing newline
assert result == STDIN_NAME
def test_get_file_base_uri_file_object_no_name():
f = json_s(BASIC_SCHEMA)
assert not hasattr(f, "name")
expected = (Path.cwd() / "-").resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
def test_load_resource_spec_file_object_stdin():
f = json_s(BASIC_SCHEMA)
f.name = STDIN_NAME
expected = (Path.cwd() / "-").resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
def test_load_resource_spec_file_object_has_name(tmpdir):
f = json_s(BASIC_SCHEMA)
f.name = tmpdir.join("test.json")
expected = Path(f.name).resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
@pytest.mark.parametrize(
"ref_fn",
(
lambda server: server.url + "/bar", # absolute
lambda _server: "./bar", # relative
),
)
def test_load_resource_spec_uses_id_if_id_is_set(ref_fn):
@Request.application
def application(_request):
return Response(json.dumps({"type": "string"}), mimetype="application/json")
with wsgi_serve(application) as server:
schema = {
**BASIC_SCHEMA,
"$id": server.url + "/foo",
"properties": {"foo": {"$ref": ref_fn(server)}},
}
inlined = load_resource_spec(json_s(schema))
assert inlined["remote"]["schema0"]["type"] == "string"
def test_load_resource_spec_inliner_produced_invalid_schema():
with patch("rpdk.core.data_loaders.RefInliner", autospec=True) as mock_inliner:
mock_inliner.return_value.inline.return_value = {}
with pytest.raises(InternalError) as excinfo:
load_resource_spec(json_s(BASIC_SCHEMA))
mock_inliner.assert_called_once_with(ANY, BASIC_SCHEMA)
cause = excinfo.value.__cause__
assert cause
assert isinstance(cause, ValidationError)
def test_load_resource_spec_invalid_ref():
copy = json.loads(json.dumps(BASIC_SCHEMA))
copy["properties"]["foo"] = {"$ref": "#/bar"}
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(json_s(copy))
cause = excinfo.value.__cause__
assert cause
assert isinstance(cause, RefResolutionError)
assert "bar" in str(cause)
@pytest.fixture
def plugin():
mock_plugin = create_autospec(LanguagePlugin)
mock_plugin.project_settings_defaults.return_value = resource_stream(
__name__, "data/project_defaults.yaml"
)
mock_plugin.project_settings_schema.return_value = resource_json(
__name__, "data/project_schema.json"
)
return mock_plugin
def test_make_validator_handlers_use_local_meta_schema():
try:
validator = make_validator(
{"$ref": "https://somewhere/does/not/exist"}, base_uri="http://localhost/"
)
validator.validate(True)
except Exception: # pylint: disable=broad-except
pytest.fail("Unexpect error, should success")
def mock_pkg_resource_stream(bytes_in, func=resource_stream):
resource_name = "data/test.utf-8"
target = "rpdk.core.data_loaders.pkg_resources.resource_stream"
with patch(target, autospec=True, return_value=BytesIO(bytes_in)) as mock_stream:
f = func(__name__, resource_name)
mock_stream.assert_called_once_with(__name__, resource_name)
return f
def test_resource_stream_decoding_valid():
emoji_santa = "🎅"
f = mock_pkg_resource_stream(emoji_santa.encode("utf-8"))
assert f.read() == emoji_santa
def test_resource_stream_decoding_invalid():
f = mock_pkg_resource_stream(INVALID_UTF8)
# stream is lazily decoded
with pytest.raises(UnicodeDecodeError) as excinfo:
f.read()
assert excinfo.value.encoding == "utf-8"
assert excinfo.value.object == INVALID_UTF8
def test_resource_stream_universal_newlines():
f = mock_pkg_resource_stream(b"Windows\r\n")
assert f.read() == "Windows\n"
def test_resource_stream_with_statement():
string = "Hello, World"
with mock_pkg_resource_stream(string.encode("utf-8")) as f:
assert f.read() == string
with pytest.raises(ValueError) as excinfo:
f.read()
assert "I/O operation on closed file" in str(excinfo.value)
def test_resource_json():
obj = {"foo": "bar"}
encoded = json.dumps(obj).encode("utf-8")
result = mock_pkg_resource_stream(encoded, func=resource_json)
assert result == obj
def test_resource_yaml():
obj = {"foo": "bar"}
encoded = yaml.dump(obj).encode("utf-8")
result = mock_pkg_resource_stream(encoded, func=resource_yaml)
assert result == obj
| 30.143357 | 86 | 0.702239 |
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from pathlib import Path
from subprocess import check_output
from unittest.mock import ANY, create_autospec, patch
import pytest
import yaml
from jsonschema.exceptions import RefResolutionError, ValidationError
from pytest_localserver.http import Request, Response, WSGIServer
from rpdk.core.data_loaders import (
STDIN_NAME,
get_file_base_uri,
load_resource_spec,
make_validator,
resource_json,
resource_stream,
resource_yaml,
)
from rpdk.core.exceptions import InternalError, SpecValidationError
from rpdk.core.plugin_base import LanguagePlugin
BASEDIR = Path(__file__).parent
INVALID_UTF8 = b"\x80"
BASIC_SCHEMA = {
"typeName": "AWS::FOO::BAR",
"description": "test schema",
"properties": {"foo": {"type": "string"}},
"primaryIdentifier": ["/properties/foo"],
"additionalProperties": False,
}
def json_s(obj):
return StringIO(json.dumps(obj))
@contextmanager
def wsgi_serve(application):
server = WSGIServer(application=application)
try:
server.start()
yield server
finally:
server.stop()
def test_load_resource_spec_invalid_json():
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(StringIO('{"foo": "aaaaa}'))
assert "line 1" in str(excinfo.value)
assert "column 9" in str(excinfo.value)
def test_load_resource_spec_empty_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(StringIO(""))
def test_load_resource_spec_boolean_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(json_s(True))
def test_load_resource_spec_empty_object_is_invalid():
with pytest.raises(SpecValidationError):
load_resource_spec(json_s({}))
def json_files_params(path, glob="*.json"):
return tuple(pytest.param(p, id=p.name) for p in path.glob(glob))
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR.parent / "examples" / "schema", "*-*-*.json")
)
def test_load_resource_spec_example_spec_is_valid(example):
with example.open("r", encoding="utf-8") as f:
assert load_resource_spec(f)
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR / "data" / "schema" / "valid")
)
def test_load_resource_spec_valid_snippets(example):
with example.open("r", encoding="utf-8") as f:
assert load_resource_spec(f)
@pytest.mark.parametrize(
"example", json_files_params(BASEDIR / "data" / "schema" / "invalid")
)
def test_load_resource_spec_invalid_snippets(example):
with example.open("r", encoding="utf-8") as f:
with pytest.raises(SpecValidationError):
load_resource_spec(f)
def test_load_resource_spec_remote_key_is_invalid():
schema = {
"typeName": "AWS::FOO::BAR",
"description": "test schema",
"properties": {"foo": {"type": "string"}},
"primaryIdentifier": ["/properties/foo"],
"remote": {},
}
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(json_s(schema))
assert "remote" in str(excinfo.value)
def test_argparse_stdin_name():
code = "; ".join(
"""import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", type=argparse.FileType("r"))
args = parser.parse_args(["-"])
print(args.file.name)
""".splitlines()
)
raw = check_output(["python3", "-c", code])
result = raw.rstrip().decode("utf-8") # remove trailing newline
assert result == STDIN_NAME
def test_get_file_base_uri_file_object_no_name():
f = json_s(BASIC_SCHEMA)
assert not hasattr(f, "name")
expected = (Path.cwd() / "-").resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
def test_load_resource_spec_file_object_stdin():
f = json_s(BASIC_SCHEMA)
f.name = STDIN_NAME
expected = (Path.cwd() / "-").resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
def test_load_resource_spec_file_object_has_name(tmpdir):
f = json_s(BASIC_SCHEMA)
f.name = tmpdir.join("test.json")
expected = Path(f.name).resolve().as_uri()
actual = get_file_base_uri(f)
assert actual == expected
@pytest.mark.parametrize(
"ref_fn",
(
lambda server: server.url + "/bar", # absolute
lambda _server: "./bar", # relative
),
)
def test_load_resource_spec_uses_id_if_id_is_set(ref_fn):
@Request.application
def application(_request):
return Response(json.dumps({"type": "string"}), mimetype="application/json")
with wsgi_serve(application) as server:
schema = {
**BASIC_SCHEMA,
"$id": server.url + "/foo",
"properties": {"foo": {"$ref": ref_fn(server)}},
}
inlined = load_resource_spec(json_s(schema))
assert inlined["remote"]["schema0"]["type"] == "string"
def test_load_resource_spec_inliner_produced_invalid_schema():
with patch("rpdk.core.data_loaders.RefInliner", autospec=True) as mock_inliner:
mock_inliner.return_value.inline.return_value = {}
with pytest.raises(InternalError) as excinfo:
load_resource_spec(json_s(BASIC_SCHEMA))
mock_inliner.assert_called_once_with(ANY, BASIC_SCHEMA)
cause = excinfo.value.__cause__
assert cause
assert isinstance(cause, ValidationError)
def test_load_resource_spec_invalid_ref():
copy = json.loads(json.dumps(BASIC_SCHEMA))
copy["properties"]["foo"] = {"$ref": "
with pytest.raises(SpecValidationError) as excinfo:
load_resource_spec(json_s(copy))
cause = excinfo.value.__cause__
assert cause
assert isinstance(cause, RefResolutionError)
assert "bar" in str(cause)
@pytest.fixture
def plugin():
mock_plugin = create_autospec(LanguagePlugin)
mock_plugin.project_settings_defaults.return_value = resource_stream(
__name__, "data/project_defaults.yaml"
)
mock_plugin.project_settings_schema.return_value = resource_json(
__name__, "data/project_schema.json"
)
return mock_plugin
def test_make_validator_handlers_use_local_meta_schema():
try:
validator = make_validator(
{"$ref": "https://somewhere/does/not/exist"}, base_uri="http://localhost/"
)
validator.validate(True)
except Exception: # pylint: disable=broad-except
pytest.fail("Unexpect error, should success")
def mock_pkg_resource_stream(bytes_in, func=resource_stream):
resource_name = "data/test.utf-8"
target = "rpdk.core.data_loaders.pkg_resources.resource_stream"
with patch(target, autospec=True, return_value=BytesIO(bytes_in)) as mock_stream:
f = func(__name__, resource_name)
mock_stream.assert_called_once_with(__name__, resource_name)
return f
def test_resource_stream_decoding_valid():
emoji_santa = "🎅"
f = mock_pkg_resource_stream(emoji_santa.encode("utf-8"))
assert f.read() == emoji_santa
def test_resource_stream_decoding_invalid():
f = mock_pkg_resource_stream(INVALID_UTF8)
# stream is lazily decoded
with pytest.raises(UnicodeDecodeError) as excinfo:
f.read()
assert excinfo.value.encoding == "utf-8"
assert excinfo.value.object == INVALID_UTF8
def test_resource_stream_universal_newlines():
f = mock_pkg_resource_stream(b"Windows\r\n")
assert f.read() == "Windows\n"
def test_resource_stream_with_statement():
string = "Hello, World"
with mock_pkg_resource_stream(string.encode("utf-8")) as f:
assert f.read() == string
with pytest.raises(ValueError) as excinfo:
f.read()
assert "I/O operation on closed file" in str(excinfo.value)
def test_resource_json():
obj = {"foo": "bar"}
encoded = json.dumps(obj).encode("utf-8")
result = mock_pkg_resource_stream(encoded, func=resource_json)
assert result == obj
def test_resource_yaml():
obj = {"foo": "bar"}
encoded = yaml.dump(obj).encode("utf-8")
result = mock_pkg_resource_stream(encoded, func=resource_yaml)
assert result == obj
| true | true |
f7f610cb2c1f36ae5aef88883cea356ae96bd09a | 1,174 | py | Python | runtests.py | StreetVoice/django-celery-ses | db25ad23b0ea01d9054e7d3f2481387488dcb809 | [
"MIT"
] | 18 | 2015-05-14T09:54:15.000Z | 2022-03-08T06:59:32.000Z | runtests.py | StreetVoice/django-celery-ses | db25ad23b0ea01d9054e7d3f2481387488dcb809 | [
"MIT"
] | 14 | 2017-02-23T08:49:17.000Z | 2021-03-26T07:22:52.000Z | runtests.py | StreetVoice/django-celery-ses | db25ad23b0ea01d9054e7d3f2481387488dcb809 | [
"MIT"
] | 9 | 2015-06-15T09:31:36.000Z | 2021-12-25T08:49:05.000Z | #!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
import django
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
MIDDLEWARE_CLASSES=[],
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'djcelery_ses',
],
SITE_ID=1,
DEBUG=False,
ROOT_URLCONF='djcelery_ses.urls',
CELERY_EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner',
)
def runtests(**test_args):
from django.test.utils import get_runner
django.setup()
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(['djcelery_ses'], test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| 23.48 | 73 | 0.653322 |
import sys
from os.path import dirname, abspath
from django.conf import settings
import django
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
MIDDLEWARE_CLASSES=[],
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'djcelery_ses',
],
SITE_ID=1,
DEBUG=False,
ROOT_URLCONF='djcelery_ses.urls',
CELERY_EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner',
)
def runtests(**test_args):
from django.test.utils import get_runner
django.setup()
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(['djcelery_ses'], test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| true | true |
f7f6112f0313a638854d1e74f9eb0e36796d4115 | 5,828 | py | Python | codes/torch2onnx.py | sanchitvohra/EGVSR | c8927c3aa876fac8ce2508b00c6ed3a3d6df226c | [
"MIT"
] | 709 | 2021-04-27T12:12:32.000Z | 2022-03-31T02:02:29.000Z | codes/torch2onnx.py | sanchitvohra/EGVSR | c8927c3aa876fac8ce2508b00c6ed3a3d6df226c | [
"MIT"
] | 14 | 2021-07-13T12:04:42.000Z | 2022-03-17T12:10:51.000Z | codes/torch2onnx.py | riverlight/egvsr | 3c0b478179f772d7fe7521655008a2d79a6b6185 | [
"MIT"
] | 90 | 2021-07-07T18:35:18.000Z | 2022-03-30T07:01:44.000Z | import os
import os.path as osp
import argparse
import yaml
import time
import torch
from data import create_dataloader, prepare_data
from models import define_model
from models.networks import define_generator
from utils import base_utils, data_utils
from metrics.model_summary import register, profile_model
def test(opt):
# logging
logger = base_utils.get_logger('base')
if opt['verbose']:
logger.info('{} Configurations {}'.format('=' * 20, '=' * 20))
base_utils.print_options(opt, logger)
# infer and evaluate performance for each model
for load_path in opt['model']['generator']['load_path_lst']:
# setup model index
model_idx = osp.splitext(osp.split(load_path)[-1])[0]
# log
logger.info('=' * 40)
logger.info('Testing model: {}'.format(model_idx))
logger.info('=' * 40)
# create model
opt['model']['generator']['load_path'] = load_path
model = define_model(opt)
# for each test dataset
for dataset_idx in sorted(opt['dataset'].keys()):
# use dataset with prefix `test`
if not dataset_idx.startswith('test'):
continue
ds_name = opt['dataset'][dataset_idx]['name']
logger.info('Testing on {}: {}'.format(dataset_idx, ds_name))
# create data loader
test_loader = create_dataloader(opt, dataset_idx=dataset_idx)
# infer and store results for each sequence
for i, data in enumerate(test_loader):
# fetch data
lr_data = data['lr'][0]
seq_idx = data['seq_idx'][0]
frm_idx = [frm_idx[0] for frm_idx in data['frm_idx']]
# infer
hr_seq = model.infer(lr_data) # thwc|rgb|uint8
# save results (optional)
if opt['test']['save_res']:
res_dir = osp.join(
opt['test']['res_dir'], ds_name, model_idx)
res_seq_dir = osp.join(res_dir, seq_idx)
data_utils.save_sequence(
res_seq_dir, hr_seq, frm_idx, to_bgr=True)
logger.info('-' * 40)
# logging
logger.info('Finish testing')
logger.info('=' * 40)
if __name__ == '__main__':
# ----------------- parse arguments ----------------- #
parser = argparse.ArgumentParser()
parser.add_argument('--exp_dir', type=str, default="../../experiments_BI/TecoGAN/001")
parser.add_argument('--mode', type=str, default="test")
parser.add_argument('--model', type=str, default="TecoGAN")
parser.add_argument('--opt', type=str, default="test_onnx.yml")
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--lr_size', type=str, default='3x960x540')
parser.add_argument('--test_speed', action='store_true')
args = parser.parse_args()
# ----------------- get options ----------------- #
print(args.exp_dir)
with open(osp.join(args.exp_dir, args.opt), 'r') as f:
opt = yaml.load(f.read(), Loader=yaml.FullLoader)
# ----------------- general configs ----------------- #
# experiment dir
opt['exp_dir'] = args.exp_dir
# random seed
base_utils.setup_random_seed(opt['manual_seed'])
# logger
base_utils.setup_logger('base')
opt['verbose'] = opt.get('verbose', False)
# device
if args.gpu_id >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
if torch.cuda.is_available():
# TODO: torch.backends.cudnn.benchmark setting
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
torch.backends.cudnn.benchmark = True
opt['device'] = 'cuda'
else:
opt['device'] = 'cpu'
else:
opt['device'] = 'cpu'
# ----------------- test ----------------- #
# basic configs
scale = opt['scale']
device = torch.device(opt['device'])
# create model
net_G = define_generator(opt).to(device)
from models.networks.tecogan_nets import FNet, SRNet
fnet = FNet(in_nc=opt['model']['generator']['in_nc']).to(device)
srnet = SRNet(in_nc=opt['model']['generator']['in_nc'], out_nc=3, nf=64, nb=10, upsample_func=None, scale=4).to(device)
# get dummy input
lr_size = tuple(map(int, args.lr_size.split('x')))
dummy_input_dict = net_G.generate_dummy_input(lr_size)
for key in dummy_input_dict.keys():
dummy_input_dict[key] = dummy_input_dict[key].to(device)
lr_curr = dummy_input_dict['lr_curr']
lr_prev = dummy_input_dict['lr_prev']
hr_prev = dummy_input_dict['hr_prev']
hr_prev_warp = torch.rand(1, 3*16, 960, 540, dtype=torch.float32).to(device)
# test running speed
n_test = 30
tot_time = 0
fnet.eval()
for i in range(n_test):
print('run num:', i)
start_time = time.time()
with torch.no_grad():
try:
# rst = net_G(**dummy_input_dict)
# rst = fnet(lr_curr, lr_prev)
rst = srnet(lr_curr, hr_prev_warp)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
raise e
end_time = time.time()
tot_time += end_time - start_time
print('Speed (FPS): {:.3f} (averaged for {} runs)'.format(n_test / tot_time, n_test))
print('-' * 40)
# torch to onnx
# input_fnet = (lr_curr, lr_prev)
# input_srnet = (lr_curr, hr_prev_warp)
# torch.onnx.export(fnet, input_fnet, "fnet.onnx", verbose=True, opset_version=11)
| 34.081871 | 123 | 0.579444 | import os
import os.path as osp
import argparse
import yaml
import time
import torch
from data import create_dataloader, prepare_data
from models import define_model
from models.networks import define_generator
from utils import base_utils, data_utils
from metrics.model_summary import register, profile_model
def test(opt):
logger = base_utils.get_logger('base')
if opt['verbose']:
logger.info('{} Configurations {}'.format('=' * 20, '=' * 20))
base_utils.print_options(opt, logger)
for load_path in opt['model']['generator']['load_path_lst']:
model_idx = osp.splitext(osp.split(load_path)[-1])[0]
logger.info('=' * 40)
logger.info('Testing model: {}'.format(model_idx))
logger.info('=' * 40)
opt['model']['generator']['load_path'] = load_path
model = define_model(opt)
for dataset_idx in sorted(opt['dataset'].keys()):
if not dataset_idx.startswith('test'):
continue
ds_name = opt['dataset'][dataset_idx]['name']
logger.info('Testing on {}: {}'.format(dataset_idx, ds_name))
test_loader = create_dataloader(opt, dataset_idx=dataset_idx)
for i, data in enumerate(test_loader):
lr_data = data['lr'][0]
seq_idx = data['seq_idx'][0]
frm_idx = [frm_idx[0] for frm_idx in data['frm_idx']]
hr_seq = model.infer(lr_data)
if opt['test']['save_res']:
res_dir = osp.join(
opt['test']['res_dir'], ds_name, model_idx)
res_seq_dir = osp.join(res_dir, seq_idx)
data_utils.save_sequence(
res_seq_dir, hr_seq, frm_idx, to_bgr=True)
logger.info('-' * 40)
logger.info('Finish testing')
logger.info('=' * 40)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_dir', type=str, default="../../experiments_BI/TecoGAN/001")
parser.add_argument('--mode', type=str, default="test")
parser.add_argument('--model', type=str, default="TecoGAN")
parser.add_argument('--opt', type=str, default="test_onnx.yml")
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--lr_size', type=str, default='3x960x540')
parser.add_argument('--test_speed', action='store_true')
args = parser.parse_args()
print(args.exp_dir)
with open(osp.join(args.exp_dir, args.opt), 'r') as f:
opt = yaml.load(f.read(), Loader=yaml.FullLoader)
opt['exp_dir'] = args.exp_dir
base_utils.setup_random_seed(opt['manual_seed'])
base_utils.setup_logger('base')
opt['verbose'] = opt.get('verbose', False)
if args.gpu_id >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
opt['device'] = 'cuda'
else:
opt['device'] = 'cpu'
else:
opt['device'] = 'cpu'
scale = opt['scale']
device = torch.device(opt['device'])
net_G = define_generator(opt).to(device)
from models.networks.tecogan_nets import FNet, SRNet
fnet = FNet(in_nc=opt['model']['generator']['in_nc']).to(device)
srnet = SRNet(in_nc=opt['model']['generator']['in_nc'], out_nc=3, nf=64, nb=10, upsample_func=None, scale=4).to(device)
lr_size = tuple(map(int, args.lr_size.split('x')))
dummy_input_dict = net_G.generate_dummy_input(lr_size)
for key in dummy_input_dict.keys():
dummy_input_dict[key] = dummy_input_dict[key].to(device)
lr_curr = dummy_input_dict['lr_curr']
lr_prev = dummy_input_dict['lr_prev']
hr_prev = dummy_input_dict['hr_prev']
hr_prev_warp = torch.rand(1, 3*16, 960, 540, dtype=torch.float32).to(device)
n_test = 30
tot_time = 0
fnet.eval()
for i in range(n_test):
print('run num:', i)
start_time = time.time()
with torch.no_grad():
try:
rst = srnet(lr_curr, hr_prev_warp)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
raise e
end_time = time.time()
tot_time += end_time - start_time
print('Speed (FPS): {:.3f} (averaged for {} runs)'.format(n_test / tot_time, n_test))
print('-' * 40)
| true | true |
f7f611790b6f8332813be1710663a28bfb2d2f53 | 122 | py | Python | CodeChef/life_the_universe_and_everything.py | pybae/etc | ba3d6291ed5dd8e6b6ee18b186a09600def56505 | [
"MIT"
] | null | null | null | CodeChef/life_the_universe_and_everything.py | pybae/etc | ba3d6291ed5dd8e6b6ee18b186a09600def56505 | [
"MIT"
] | null | null | null | CodeChef/life_the_universe_and_everything.py | pybae/etc | ba3d6291ed5dd8e6b6ee18b186a09600def56505 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
num = int(input())
while (num != 42):
print num
num = int(input())
| 15.25 | 23 | 0.532787 |
num = int(input())
while (num != 42):
print num
num = int(input())
| false | true |
f7f6117cd34c38631d51b40d275abe103ca922f6 | 221 | py | Python | recommender/recommender/utils.py | cnam0203/trivi-backend | d6a4c6c600bdf22fd45c72c25c7ab55281339a0c | [
"MIT"
] | 1 | 2020-06-24T04:44:33.000Z | 2020-06-24T04:44:33.000Z | recommender/recommender/utils.py | cnam0203/trivi-backend | d6a4c6c600bdf22fd45c72c25c7ab55281339a0c | [
"MIT"
] | 23 | 2020-08-15T15:18:32.000Z | 2022-02-26T13:49:05.000Z | recommender/recommender/utils.py | cnam0203/trivi-backend | d6a4c6c600bdf22fd45c72c25c7ab55281339a0c | [
"MIT"
] | 1 | 2021-05-12T19:08:52.000Z | 2021-05-12T19:08:52.000Z | from core.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
} | 27.625 | 71 | 0.687783 | from core.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
} | true | true |
f7f6122f701a186a7b0edb689dc21091a0299482 | 2,469 | py | Python | simple_weather_bot/api.py | hexfaker/simple-weather-bot | 330a4bc2fcf1b7892f514ee5a198d62a9f7b4039 | [
"MIT"
] | 2 | 2020-08-15T04:54:01.000Z | 2020-08-15T07:37:35.000Z | simple_weather_bot/api.py | hexfaker/simple-weather-bot | 330a4bc2fcf1b7892f514ee5a198d62a9f7b4039 | [
"MIT"
] | 5 | 2020-08-17T06:05:13.000Z | 2020-09-07T05:52:24.000Z | simple_weather_bot/api.py | hexfaker/simple-weather-bot | 330a4bc2fcf1b7892f514ee5a198d62a9f7b4039 | [
"MIT"
] | null | null | null | import datetime as dt
from datetime import datetime
from typing import List, Tuple
import httpx
import pytz
from pydantic import BaseModel, Extra
class Coords(BaseModel):
lat: float
lng: float
class Temp(BaseModel):
day: float
min: float
max: float
night: float
eve: float
morn: float
class FeelsLike(BaseModel):
day: float
night: float
eve: float
morn: float
class WeatherDesc(BaseModel):
class Config:
extra = Extra.ignore
main: str
description: str
class Weather(BaseModel):
class Config:
extra = Extra.ignore
temp: Temp
feels_like: FeelsLike
humidity: int
wind_speed: float
weather: List[WeatherDesc]
def get_today_at_timezone(tzname: str) -> Tuple[dt.date, dt.timezone]:
tz = pytz.timezone(tzname)
now = datetime.now(tz)
return now.date(), tz
class GeocodingAPI:
def __init__(self, key: str):
self.key = key
def get_coordinates(self, address: str):
resp = httpx.get(
"http://www.mapquestapi.com/geocoding/v1/address",
params={"key": self.key, "location": address},
).json()
if len(resp["results"]) > 0:
locations = resp["results"][0]["locations"][0]
return Coords(**locations["latLng"])
return None
class WeatherAPI:
def __init__(self, key: str):
self.key = key
def get_weather_today(self, coords: Coords):
resp = httpx.get(
"https://api.openweathermap.org/data/2.5/onecall",
params={
"lat": coords.lat,
"lon": coords.lng,
"part": "daily",
"appid": self.key,
"units": "metric",
"lang": "ru",
},
).json()
today, tz = get_today_at_timezone(resp["timezone"])
weather_by_day = {
datetime.utcfromtimestamp(float(day["dt"]))
.astimezone(tz)
.date(): day
for day in resp["daily"]
}
res = weather_by_day[today]
return Weather.parse_obj(res)
class GeoWeatherAPI:
def __init__(self, mapquest_key: str, openweathermap_key: str):
self.geo = GeocodingAPI(mapquest_key)
self.weather = WeatherAPI(openweathermap_key)
def get_today_weather_for_address(self, address: str) -> Weather:
return self.weather.get_weather_today(self.geo.get_coordinates(address))
| 22.044643 | 80 | 0.594168 | import datetime as dt
from datetime import datetime
from typing import List, Tuple
import httpx
import pytz
from pydantic import BaseModel, Extra
class Coords(BaseModel):
lat: float
lng: float
class Temp(BaseModel):
day: float
min: float
max: float
night: float
eve: float
morn: float
class FeelsLike(BaseModel):
day: float
night: float
eve: float
morn: float
class WeatherDesc(BaseModel):
class Config:
extra = Extra.ignore
main: str
description: str
class Weather(BaseModel):
class Config:
extra = Extra.ignore
temp: Temp
feels_like: FeelsLike
humidity: int
wind_speed: float
weather: List[WeatherDesc]
def get_today_at_timezone(tzname: str) -> Tuple[dt.date, dt.timezone]:
tz = pytz.timezone(tzname)
now = datetime.now(tz)
return now.date(), tz
class GeocodingAPI:
def __init__(self, key: str):
self.key = key
def get_coordinates(self, address: str):
resp = httpx.get(
"http://www.mapquestapi.com/geocoding/v1/address",
params={"key": self.key, "location": address},
).json()
if len(resp["results"]) > 0:
locations = resp["results"][0]["locations"][0]
return Coords(**locations["latLng"])
return None
class WeatherAPI:
def __init__(self, key: str):
self.key = key
def get_weather_today(self, coords: Coords):
resp = httpx.get(
"https://api.openweathermap.org/data/2.5/onecall",
params={
"lat": coords.lat,
"lon": coords.lng,
"part": "daily",
"appid": self.key,
"units": "metric",
"lang": "ru",
},
).json()
today, tz = get_today_at_timezone(resp["timezone"])
weather_by_day = {
datetime.utcfromtimestamp(float(day["dt"]))
.astimezone(tz)
.date(): day
for day in resp["daily"]
}
res = weather_by_day[today]
return Weather.parse_obj(res)
class GeoWeatherAPI:
def __init__(self, mapquest_key: str, openweathermap_key: str):
self.geo = GeocodingAPI(mapquest_key)
self.weather = WeatherAPI(openweathermap_key)
def get_today_weather_for_address(self, address: str) -> Weather:
return self.weather.get_weather_today(self.geo.get_coordinates(address))
| true | true |
f7f6130f1583680d3c8dfebc6cfa38c56942fa01 | 2,801 | py | Python | scripts/shrink_multiframe.py | vsaase/dicom2nifti | 6722420a7673d36437e4358ce3cb2a7c77c91820 | [
"MIT"
] | 197 | 2016-04-05T15:24:23.000Z | 2022-03-25T17:37:10.000Z | scripts/shrink_multiframe.py | vsaase/dicom2nifti | 6722420a7673d36437e4358ce3cb2a7c77c91820 | [
"MIT"
] | 102 | 2017-05-12T07:08:48.000Z | 2022-03-22T00:21:54.000Z | scripts/shrink_multiframe.py | vsaase/dicom2nifti | 6722420a7673d36437e4358ce3cb2a7c77c91820 | [
"MIT"
] | 60 | 2016-12-13T22:11:56.000Z | 2022-03-30T22:55:52.000Z | # -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
import dicom
import os
import logging
import numpy
import dicom2nifti.compressed_dicom as compressed_dicom
from dicom2nifti.convert_philips import _is_multiframe_diffusion_imaging, _is_multiframe_4d
import dicom2nifti.common as common
def shrink_multiframe(input_file, output_file=None, slice_count=8, timepoint_count=4):
if output_file is None:
output_file = input_file
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(input_file)
if _is_multiframe_diffusion_imaging([dicom_in]) or _is_multiframe_4d([dicom_in]):
number_of_stack_slices = int(common.get_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)]))
number_of_stacks = int(int(dicom_in.NumberOfFrames) / number_of_stack_slices)
# We create a numpy array
size_x = dicom_in.pixel_array.shape[2]
size_y = dicom_in.pixel_array.shape[1]
size_t = number_of_stacks
frame_info = dicom_in.PerFrameFunctionalGroupsSequence
data_4d = numpy.zeros((slice_count * timepoint_count, size_x, size_y), dtype=common.get_numpy_type(dicom_in))
new_frame_info = [None] * slice_count * timepoint_count
for index_z in range(0, slice_count):
for index_t in range(0, timepoint_count):
slice_index = int(size_t * index_z + index_t)
new_slice_index = int(timepoint_count * index_z + index_t)
z_location = frame_info[slice_index].FrameContentSequence[0].InStackPositionNumber - 1
new_frame_info[new_slice_index] = frame_info[slice_index]
logging.info('Importing slice on position %s %s %s' % (slice_index, z_location, index_t))
data_4d[new_slice_index, :, :] = dicom_in.pixel_array[slice_index, :, :]
dicom_in.PixelData = data_4d.tostring()
common.set_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)], slice_count)
setattr(dicom_in, 'NumberOfFrames', slice_count * timepoint_count)
setattr(dicom_in, 'PerFrameFunctionalGroupsSequence', new_frame_info)
else:
# truncate the data
dicom_in.PixelData = dicom_in.pixel_array[:slice_count, :, :].tostring()
# set number of frames
common.set_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)], slice_count)
setattr(dicom_in, 'NumberOfFrames', slice_count)
# truncate the pre frame groups sequence
setattr(dicom_in, 'PerFrameFunctionalGroupsSequence', dicom_in.PerFrameFunctionalGroupsSequence[:slice_count])
# Save the file
dicom_in.save_as(output_file)
def main():
shrink_multiframe('/Users/abrys/Documents/data/philips_implicit/IM1.dcm',timepoint_count=1)
pass
if __name__ == "__main__":
main()
| 37.346667 | 118 | 0.706176 |
import dicom
import os
import logging
import numpy
import dicom2nifti.compressed_dicom as compressed_dicom
from dicom2nifti.convert_philips import _is_multiframe_diffusion_imaging, _is_multiframe_4d
import dicom2nifti.common as common
def shrink_multiframe(input_file, output_file=None, slice_count=8, timepoint_count=4):
if output_file is None:
output_file = input_file
dicom_in = compressed_dicom.read_file(input_file)
if _is_multiframe_diffusion_imaging([dicom_in]) or _is_multiframe_4d([dicom_in]):
number_of_stack_slices = int(common.get_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)]))
number_of_stacks = int(int(dicom_in.NumberOfFrames) / number_of_stack_slices)
size_x = dicom_in.pixel_array.shape[2]
size_y = dicom_in.pixel_array.shape[1]
size_t = number_of_stacks
frame_info = dicom_in.PerFrameFunctionalGroupsSequence
data_4d = numpy.zeros((slice_count * timepoint_count, size_x, size_y), dtype=common.get_numpy_type(dicom_in))
new_frame_info = [None] * slice_count * timepoint_count
for index_z in range(0, slice_count):
for index_t in range(0, timepoint_count):
slice_index = int(size_t * index_z + index_t)
new_slice_index = int(timepoint_count * index_z + index_t)
z_location = frame_info[slice_index].FrameContentSequence[0].InStackPositionNumber - 1
new_frame_info[new_slice_index] = frame_info[slice_index]
logging.info('Importing slice on position %s %s %s' % (slice_index, z_location, index_t))
data_4d[new_slice_index, :, :] = dicom_in.pixel_array[slice_index, :, :]
dicom_in.PixelData = data_4d.tostring()
common.set_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)], slice_count)
setattr(dicom_in, 'NumberOfFrames', slice_count * timepoint_count)
setattr(dicom_in, 'PerFrameFunctionalGroupsSequence', new_frame_info)
else:
dicom_in.PixelData = dicom_in.pixel_array[:slice_count, :, :].tostring()
common.set_ss_value(dicom_in[(0x2001, 0x105f)][0][(0x2001, 0x102d)], slice_count)
setattr(dicom_in, 'NumberOfFrames', slice_count)
setattr(dicom_in, 'PerFrameFunctionalGroupsSequence', dicom_in.PerFrameFunctionalGroupsSequence[:slice_count])
dicom_in.save_as(output_file)
def main():
shrink_multiframe('/Users/abrys/Documents/data/philips_implicit/IM1.dcm',timepoint_count=1)
pass
if __name__ == "__main__":
main()
| true | true |
f7f613558782a2fe3923d40460c1a5481f291029 | 1,748 | py | Python | examples/libs_and_modules_usage_example.py | dariaomelkina/UCU-Semester-Homework | c980d467e9eba3ab64c4db36c66c20ac92c7ba00 | [
"MIT"
] | 1 | 2020-05-19T09:03:46.000Z | 2020-05-19T09:03:46.000Z | examples/libs_and_modules_usage_example.py | dariaomelkina/UCU-Semester-Homework | c980d467e9eba3ab64c4db36c66c20ac92c7ba00 | [
"MIT"
] | 3 | 2020-04-04T17:17:29.000Z | 2020-05-19T09:24:51.000Z | examples/libs_and_modules_usage_example.py | dariaomelkina/UCU-Semester-Homework | c980d467e9eba3ab64c4db36c66c20ac92c7ba00 | [
"MIT"
] | 1 | 2020-05-19T09:20:06.000Z | 2020-05-19T09:20:06.000Z | import json
import dash
import urllib.request
import urllib.parse
import dash_core_components as dcc
import dash_html_components as html
import pandas
import plotly.express as px
BASE_URL = "https://api.nasa.gov/insight_weather/?api_key=DEMO_KEY&feedtype=json&ver=1.0"
def get_data_from_URL(base_url):
with urllib.request.urlopen(base_url) as response:
data = response.read()
data = data.decode("utf-8")
data = json.loads(data)
return data
ready_data = get_data_from_URL(BASE_URL)
lst = []
for key in ready_data.keys():
if key != "sol_keys" or key != "validity_checks":
try:
df = pandas.DataFrame(ready_data[key])
lst.append(df["AT"]["av"])
except KeyError:
break
inf_series = pandas.DataFrame(list(zip(lst, ready_data['sol_keys'])), columns=["average temperature", "day"])
fig = px.bar(inf_series, x='day', y='average temperature', text='average temperature',
hover_data=['average temperature', 'day'], color='average temperature',
labels={'pop': 'temperature'}, height=400)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'text': '#91165a'
}
app.layout = html.Div(children=[
html.H1(
children='Dash Experiment',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='There are average temperatures on Mars by days.', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-2',
figure=fig,
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| 26.089552 | 109 | 0.64016 | import json
import dash
import urllib.request
import urllib.parse
import dash_core_components as dcc
import dash_html_components as html
import pandas
import plotly.express as px
BASE_URL = "https://api.nasa.gov/insight_weather/?api_key=DEMO_KEY&feedtype=json&ver=1.0"
def get_data_from_URL(base_url):
with urllib.request.urlopen(base_url) as response:
data = response.read()
data = data.decode("utf-8")
data = json.loads(data)
return data
ready_data = get_data_from_URL(BASE_URL)
lst = []
for key in ready_data.keys():
if key != "sol_keys" or key != "validity_checks":
try:
df = pandas.DataFrame(ready_data[key])
lst.append(df["AT"]["av"])
except KeyError:
break
inf_series = pandas.DataFrame(list(zip(lst, ready_data['sol_keys'])), columns=["average temperature", "day"])
fig = px.bar(inf_series, x='day', y='average temperature', text='average temperature',
hover_data=['average temperature', 'day'], color='average temperature',
labels={'pop': 'temperature'}, height=400)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'text': '#91165a'
}
app.layout = html.Div(children=[
html.H1(
children='Dash Experiment',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='There are average temperatures on Mars by days.', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-2',
figure=fig,
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| true | true |
f7f613613fa113d1598211dd71d00a197d13b649 | 1,032 | py | Python | vdgnn/options/train_options.py | HCY123902/visdial-gnn | c38090c672cdf04a4fabe139f96d944fd82cb123 | [
"MIT"
] | 44 | 2019-04-24T22:44:52.000Z | 2022-03-15T07:09:38.000Z | vdgnn/options/train_options.py | HCY123902/visdial-gnn | c38090c672cdf04a4fabe139f96d944fd82cb123 | [
"MIT"
] | 1 | 2019-06-18T15:38:10.000Z | 2020-01-14T01:13:26.000Z | vdgnn/options/train_options.py | HCY123902/visdial-gnn | c38090c672cdf04a4fabe139f96d944fd82cb123 | [
"MIT"
] | 5 | 2019-04-25T07:13:49.000Z | 2021-06-22T16:33:54.000Z | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""Training Options"""
def initialize(self, parser):
paser = BaseOptions.initialize(self, parser)
# Optimization settings
parser.add_argument_group('Optimization specific arguments')
parser.add_argument('--num_epochs', default=20, type=int, help='maximum epoch for training')
parser.add_argument('--batch_size', default=32, type=int, help='training batch size')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--lr_decay_rate', default=0.9997592083, type=float, help='decay for lr')
parser.add_argument('--min_lr', default=5e-5, type=float, help='minimum learning rate')
# Logging settings
parser.add_argument_group('Logging specific arguments')
parser.add_argument('--log_step', default=100, type=int, help='save checkpoint after every save_step epochs')
self.isTrain = True
return parser
| 51.6 | 117 | 0.697674 | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
paser = BaseOptions.initialize(self, parser)
parser.add_argument_group('Optimization specific arguments')
parser.add_argument('--num_epochs', default=20, type=int, help='maximum epoch for training')
parser.add_argument('--batch_size', default=32, type=int, help='training batch size')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--lr_decay_rate', default=0.9997592083, type=float, help='decay for lr')
parser.add_argument('--min_lr', default=5e-5, type=float, help='minimum learning rate')
parser.add_argument_group('Logging specific arguments')
parser.add_argument('--log_step', default=100, type=int, help='save checkpoint after every save_step epochs')
self.isTrain = True
return parser
| true | true |
f7f6140a72358545584918dc768ff8897df5c7b2 | 1,921 | py | Python | src/paralleling/train_predict/cross_validation.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | src/paralleling/train_predict/cross_validation.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | src/paralleling/train_predict/cross_validation.py | tpinhoda/Graph-Based_Spatial_Cross_Validation | 19300a715d3d03580232926bbc1f6ea8800b23e3 | [
"MIT"
] | null | null | null | import contextlib
import os
import sys
import pandas as pd
import geopandas as gpd
from weka.core import jvm
from src import utils
from src.pipeline import Pipeline
from src.visualization.performance import VizMetrics
from src.visualization.dependence import VizDependence
# Set pipeline switchers
SWITCHERS = {
"scv": False,
"fs": False,
"train": True,
"predict": True,
"evaluate": False,
}
ml_methods = [
"KNN",
"OLS",
"Lasso",
"Ridge",
"ElasticNet",
"DT",
"LGBM",
"RF",
"MLP",
"SVM",
]
def main(root_path, dataset, fs_method, index_col, index_fold, target_col, ml_method):
"""Runs main script"""
utils.initialize_coloredlog()
utils.initialize_rich_tracerback()
utils.initialize_logging()
data_path = os.path.join(root_path, dataset, "data.csv")
# Load data
data = pd.read_csv(data_path, index_col=index_col, low_memory=False)
with contextlib.suppress(KeyError):
data.drop(columns=["[GEO]_LATITUDE", "[GEO]_LONGITUDE"], inplace=True)
# Run pipeline
CrossValidation = Pipeline(
root_path=os.path.join(root_path, dataset),
data=data,
meshblocks=None,
index_col=index_col,
fold_col=index_fold,
target_col=target_col,
scv_method="CrossValidation",
fs_method=fs_method,
ml_method=ml_method,
switchers=SWITCHERS,
)
print(
f"Running the CrossValidation SCV approach for dataset: {dataset} ML Method = {ml_method}"
)
CrossValidation.run()
if __name__ == "__main__":
root_path = sys.argv[1]
dataset = sys.argv[2]
fs_method = sys.argv[3]
index_col = sys.argv[4]
fold_col = sys.argv[5]
target_col = sys.argv[6]
ml_method = sys.argv[7]
print(dataset, fs_method, index_col, fold_col, target_col)
main(root_path, dataset, fs_method, index_col, fold_col, target_col, ml_method)
| 25.276316 | 98 | 0.665799 | import contextlib
import os
import sys
import pandas as pd
import geopandas as gpd
from weka.core import jvm
from src import utils
from src.pipeline import Pipeline
from src.visualization.performance import VizMetrics
from src.visualization.dependence import VizDependence
SWITCHERS = {
"scv": False,
"fs": False,
"train": True,
"predict": True,
"evaluate": False,
}
ml_methods = [
"KNN",
"OLS",
"Lasso",
"Ridge",
"ElasticNet",
"DT",
"LGBM",
"RF",
"MLP",
"SVM",
]
def main(root_path, dataset, fs_method, index_col, index_fold, target_col, ml_method):
utils.initialize_coloredlog()
utils.initialize_rich_tracerback()
utils.initialize_logging()
data_path = os.path.join(root_path, dataset, "data.csv")
data = pd.read_csv(data_path, index_col=index_col, low_memory=False)
with contextlib.suppress(KeyError):
data.drop(columns=["[GEO]_LATITUDE", "[GEO]_LONGITUDE"], inplace=True)
CrossValidation = Pipeline(
root_path=os.path.join(root_path, dataset),
data=data,
meshblocks=None,
index_col=index_col,
fold_col=index_fold,
target_col=target_col,
scv_method="CrossValidation",
fs_method=fs_method,
ml_method=ml_method,
switchers=SWITCHERS,
)
print(
f"Running the CrossValidation SCV approach for dataset: {dataset} ML Method = {ml_method}"
)
CrossValidation.run()
if __name__ == "__main__":
root_path = sys.argv[1]
dataset = sys.argv[2]
fs_method = sys.argv[3]
index_col = sys.argv[4]
fold_col = sys.argv[5]
target_col = sys.argv[6]
ml_method = sys.argv[7]
print(dataset, fs_method, index_col, fold_col, target_col)
main(root_path, dataset, fs_method, index_col, fold_col, target_col, ml_method)
| true | true |
f7f615c49cc2c48e234c32557bb12b8ab262ca77 | 3,573 | py | Python | util/kalman2d.py | jingliinpurdue/Fast-and-Robust-UAV-to-UAV-Detection-and-Tracking | 317e85a03f5c374ef8ec53b543208ec36655fa07 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T15:34:20.000Z | 2022-02-26T19:57:22.000Z | util/kalman2d.py | jingliinpurdue/Fast-and-Robust-UAV-to-UAV-Detection-and-Tracking | 317e85a03f5c374ef8ec53b543208ec36655fa07 | [
"BSD-3-Clause"
] | null | null | null | util/kalman2d.py | jingliinpurdue/Fast-and-Robust-UAV-to-UAV-Detection-and-Tracking | 317e85a03f5c374ef8ec53b543208ec36655fa07 | [
"BSD-3-Clause"
] | 9 | 2019-08-30T00:44:37.000Z | 2021-11-22T07:16:26.000Z | # Opencv 2---Version
# -*- coding: utf-8 -*-
'''
kalman2d - 2D Kalman filter using OpenCV
Based on http://jayrambhia.wordpress.com/2012/07/26/kalman-filter/
Copyright (C) 2014 Simon D. Levy
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http://www.gnu.org/licenses/>.
'''
#from cv2 import cv
import cv2
import numpy as np
class Kalman2D(object):
'''
A class for 2D Kalman filtering
'''
def __init__(self, processNoiseCovariance=1e-4, measurementNoiseCovariance=1e-1, errorCovariancePost=0.1):
#def __init__(self,processNoiseCovariance=1e-1, measurementNoiseCovariance=1e1, errorCovariancePost=1e4):
'''
Constructs a new Kalman2D object.
For explanation of the error covariances see
http://en.wikipedia.org/wiki/Kalman_filter
'''
# state space:location--2d,speed--2d
#self.kalman = cv.CreateKalman(4, 2, 0)
self.kalman = cv2.KalmanFilter(4, 2, 0)
self.kalman_measurement = np.array([[1.],[1.]],np.float32)
self.kalman.transitionMatrix = np.array([[1.,0., 1.,0.], [0., 1., 0., 1.], [0., 0., 1., 0.], [0., 0., 0., 1.]],np.float32)
self.kalman.measurementMatrix = np.array([[1.,0.,0.,0.],[0.,1.,0.,0.]],np.float32)
self.kalman.processNoiseCov = processNoiseCovariance * np.array([[1.,0.,0.,0.],[0.,1.,0.,0.],[0.,0.,1.,0.],[0.,0.,0.,1.]],np.float32)
self.kalman.measurementNoiseCov = np.array([[1.,0.],[0.,1.]],np.float32) * measurementNoiseCovariance
self.kalman.errorCovPost = np.array([[1.,0., 0, 0],[0.,1., 0, 0],[0.,0, 1, 0],[0.,0, 0, 1]],np.float32) * errorCovariancePost
#cv.SetIdentity(self.kalman.measurement_matrix)
#Initialize identity matrix
#cv.SetIdentity(self.kalman.process_noise_cov, cv.RealScalar(processNoiseCovariance))
#cv.SetIdentity(self.kalman.measurement_noise_cov, cv.RealScalar(measurementNoiseCovariance))
#cv.SetIdentity(self.kalman.error_cov_post, cv.RealScalar(errorCovariancePost))
self.predicted = np.array((2,1), np.float32)
self.corrected = np.zeros((2,1), np.float32)
def update(self, x, y):
'''
Updates the filter with a new X,Y measurement
'''
self.kalman_measurement = np.array([[np.float32(x)],[np.float32(y)]])
#self.kalman_measurement[0, 0] = x
#self.kalman_measurement[1, 0] = y
#print self.kalman.predict()
self.predicted = self.kalman.predict()
self.corrected = self.kalman.correct(self.kalman_measurement)
#self.corrected = cv.KalmanCorrect(self.kalman, self.kalman_measurement)
def getEstimate(self):
'''
Returns the current X,Y estimate.
'''
return self.corrected[0,0], self.corrected[1,0]
def getPrediction(self):
'''
Returns the current X,Y prediction.
'''
return self.predicted[0,0], self.predicted[1,0]
| 41.068966 | 141 | 0.623286 |
import cv2
import numpy as np
class Kalman2D(object):
def __init__(self, processNoiseCovariance=1e-4, measurementNoiseCovariance=1e-1, errorCovariancePost=0.1):
self.kalman = cv2.KalmanFilter(4, 2, 0)
self.kalman_measurement = np.array([[1.],[1.]],np.float32)
self.kalman.transitionMatrix = np.array([[1.,0., 1.,0.], [0., 1., 0., 1.], [0., 0., 1., 0.], [0., 0., 0., 1.]],np.float32)
self.kalman.measurementMatrix = np.array([[1.,0.,0.,0.],[0.,1.,0.,0.]],np.float32)
self.kalman.processNoiseCov = processNoiseCovariance * np.array([[1.,0.,0.,0.],[0.,1.,0.,0.],[0.,0.,1.,0.],[0.,0.,0.,1.]],np.float32)
self.kalman.measurementNoiseCov = np.array([[1.,0.],[0.,1.]],np.float32) * measurementNoiseCovariance
self.kalman.errorCovPost = np.array([[1.,0., 0, 0],[0.,1., 0, 0],[0.,0, 1, 0],[0.,0, 0, 1]],np.float32) * errorCovariancePost
self.predicted = np.array((2,1), np.float32)
self.corrected = np.zeros((2,1), np.float32)
def update(self, x, y):
self.kalman_measurement = np.array([[np.float32(x)],[np.float32(y)]])
self.predicted = self.kalman.predict()
self.corrected = self.kalman.correct(self.kalman_measurement)
def getEstimate(self):
return self.corrected[0,0], self.corrected[1,0]
def getPrediction(self):
return self.predicted[0,0], self.predicted[1,0]
| true | true |
f7f615c5498020c52d6fd362f6b4133390c793ea | 14,407 | py | Python | pavement.py | illume/numpy3k | 42171a679b0ef24932fe08fc88cce039abf6de2b | [
"BSD-3-Clause"
] | 2 | 2020-07-03T12:00:29.000Z | 2021-04-18T06:54:30.000Z | pavement.py | plaes/numpy | 209866bc55eee56e92692307c4437af024bae87d | [
"BSD-3-Clause"
] | null | null | null | pavement.py | plaes/numpy | 209866bc55eee56e92692307c4437af024bae87d | [
"BSD-3-Clause"
] | null | null | null | """
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source boostrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setupegg.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c. Build python
2.5 and python 2.6 installers.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import subprocess
import re
import shutil
try:
from hash import md5
except ImportError:
import md5
import distutils
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except ImportError, e:
raise RuntimeError("paver version >= 1.0 required")
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task
setup_py = __import__("setup")
FULLVERSION = setup_py.FULLVERSION
# Wine config for win32 builds
WINE_SITE_CFG = ""
if sys.platform == "darwin":
WINE_PY25 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python26/python.exe"
else:
WINE_PY25 = "/home/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/home/david/.wine/drive_c/Python26/python.exe"
WINE_PYS = {'2.6' : WINE_PY26, '2.5': WINE_PY25}
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
# Where to put built documentation (where it will picked up for copy to
# binaries)
PDF_DESTDIR = paver.path.path('build') / 'pdf'
HTML_DESTDIR = paver.path.path('build') / 'html'
DOC_ROOT = paver.path.path("doc")
DOC_SRC = DOC_ROOT / "source"
DOC_BLD = DOC_ROOT / "build"
DOC_BLD_LATEX = DOC_BLD / "latex"
# Source of the release notes
RELEASE = 'doc/release/1.3.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'tags/1.2.0'
LOG_END = 'master'
# Virtualenv bootstrap stuff
BOOTSTRAP_DIR = "bootstrap"
BOOTSTRAP_PYEXEC = "%s/bin/python" % BOOTSTRAP_DIR
BOOTSTRAP_SCRIPT = "%s/bootstrap.py" % BOOTSTRAP_DIR
DMG_CONTENT = paver.path.path('numpy-macosx-installer') / 'content'
# Where to put the final installers, as put on sourceforge
RELEASE_DIR = 'release'
INSTALLERS_DIR = os.path.join(RELEASE_DIR, 'installers')
# XXX: fix this in a sane way
MPKG_PYTHON = {"25": "/Library/Frameworks/Python.framework/Versions/2.5/bin/python",
"26": "/Library/Frameworks/Python.framework/Versions/2.6/bin/python"}
options(sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
virtualenv=Bunch(script_name=BOOTSTRAP_SCRIPT,packages_to_install=["sphinx==0.6.1"]),
wininst=Bunch(pyver="2.5", scratch=True))
# Bootstrap stuff
@task
def bootstrap():
"""create virtualenv in ./install"""
install = paver.path.path(BOOTSTRAP_DIR)
if not install.exists():
install.mkdir()
call_task('paver.virtual.bootstrap')
sh('cd %s; %s bootstrap.py' % (BOOTSTRAP_DIR, sys.executable))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
paver.path.path(i).rmtree()
(paver.path.path('doc') / options.sphinx.builddir).rmtree()
@task
def clean_bootstrap():
paver.path.path('bootstrap').rmtree()
@task
@needs('clean', 'clean_bootstrap')
def nuke():
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
d = [SUPERPACK_BUILD, INSTALLERS_DIR]
for i in d:
paver.path.path(i).rmtree()
# NOTES/Changelog stuff
def compute_md5():
released = paver.path.path(INSTALLERS_DIR).listdir()
checksums = []
for f in released:
m = md5.md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), f))
return checksums
def write_release_task(filename='NOTES.txt'):
source = paver.path.path(RELEASE)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
""")
ftarget.writelines(['%s\n' % c for c in compute_md5()])
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'svn', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release():
write_release_task()
@task
def write_log():
write_log_task()
# Doc stuff
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
subprocess.check_call(["make", "html"], cwd="doc")
builtdocs = paver.path.path("doc") / options.sphinx.builddir / "html"
HTML_DESTDIR.rmtree()
builtdocs.copytree(HTML_DESTDIR)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(DOC_BLD_LATEX))
dry("Build pdf doc", build_pdf)
PDF_DESTDIR.rmtree()
PDF_DESTDIR.makedirs()
user = DOC_BLD_LATEX / "numpy-user.pdf"
user.copy(PDF_DESTDIR / "userguide.pdf")
ref = DOC_BLD_LATEX / "numpy-ref.pdf"
ref.copy(PDF_DESTDIR / "reference.pdf")
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist():
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(INSTALLERS_DIR, tarball_name(t))
shutil.copy(source, target)
#------------------
# Wine-based builds
#------------------
SSE3_CFG = {'BLAS': r'C:\local\lib\yop\sse3', 'LAPACK': r'C:\local\lib\yop\sse3'}
SSE2_CFG = {'BLAS': r'C:\local\lib\yop\sse2', 'LAPACK': r'C:\local\lib\yop\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\yop\nosse', 'LAPACK': r'C:\local\lib\yop\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
def internal_wininst_name(arch, ismsi=False):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
if ismsi:
ext = '.msi'
else:
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver, ismsi=False):
"""Return the name of the installer built by wininst command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
if ismsi:
ext = '.msi'
else:
ext = '.exe'
name = "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
return name
def bdist_wininst_arch(pyver, arch, scratch=True):
"""Arch specific wininst build."""
if scratch:
paver.path.path('build').rmtree()
if not os.path.exists(SUPERPACK_BINDIR):
os.makedirs(SUPERPACK_BINDIR)
_bdist_wininst(pyver, SITECFG[arch])
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
os.rename(source, target)
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
@task
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'nosse', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse2', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse3', scratch=options.wininst.scratch)
@task
@needs('bdist_wininst_nosse', 'bdist_wininst_sse2', 'bdist_wininst_sse3')
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
prepare_nsis_script(options.wininst.pyver, FULLVERSION)
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
source = os.path.join(SUPERPACK_BUILD,
superpack_name(options.wininst.pyver, FULLVERSION))
target = os.path.join(INSTALLERS_DIR,
superpack_name(options.wininst.pyver, FULLVERSION))
shutil.copy(source, target)
@task
@needs('clean', 'bdist_wininst')
def bdist_wininst_simple():
"""Simple wininst-based installer."""
_bdist_wininst(pyver=options.wininst.pyver)
def _bdist_wininst(pyver, cfg_env=WINE_SITE_CFG):
subprocess.check_call([WINE_PYS[pyver], 'setup.py', 'build', '-c', 'mingw32', 'bdist_wininst'], env=cfg_env)
#-------------------
# Mac OS X installer
#-------------------
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.mpkg" % \
(FULLVERSION, pyver, maj, min)
def dmg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.dmg" % \
(FULLVERSION, pyver, maj, min)
@task
def bdist_mpkg():
call_task("clean")
pyver = "".join([str(i) for i in sys.version_info[:2]])
sh("%s setupegg.py bdist_mpkg" % MPKG_PYTHON[pyver])
@task
@needs("bdist_mpkg", "pdf")
def dmg():
pyver = ".".join([str(i) for i in sys.version_info[:2]])
dmg_n = dmg_name()
dmg = paver.path.path('numpy-macosx-installer') / dmg_n
if dmg.exists():
dmg.remove()
# Clean the image source
content = DMG_CONTENT
content.rmtree()
content.mkdir()
# Copy mpkg into image source
mpkg_n = mpkg_name()
mpkg_tn = "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver)
mpkg_source = paver.path.path("dist") / mpkg_n
mpkg_target = content / mpkg_tn
mpkg_source.copytree(content / mpkg_tn)
# Copy docs into image source
#html_docs = HTML_DESTDIR
#html_docs.copytree(content / "Documentation" / "html")
pdf_docs = DMG_CONTENT / "Documentation"
pdf_docs.rmtree()
pdf_docs.makedirs()
user = PDF_DESTDIR / "userguide.pdf"
user.copy(pdf_docs / "userguide.pdf")
ref = PDF_DESTDIR / "reference.pdf"
ref.copy(pdf_docs / "reference.pdf")
# Build the dmg
cmd = ["./create-dmg", "--window-size", "500", "500", "--background",
"art/dmgbackground.png", "--icon-size", "128", "--icon", mpkg_tn,
"125", "320", "--icon", "Documentation", "375", "320", "--volname", "numpy",
dmg_n, "./content"]
subprocess.check_call(cmd, cwd="numpy-macosx-installer")
@task
def simple_dmg():
# Build the dmg
image_name = "numpy-%s.dmg" % FULLVERSION
image = paver.path.path(image_name)
image.remove()
cmd = ["hdiutil", "create", image_name, "-srcdir", str(builddir)]
sh(" ".join(cmd))
@task
def write_note_changelog():
write_release_task(os.path.join(RELEASE_DIR, 'NOTES.txt'))
write_log_task(os.path.join(RELEASE_DIR, 'Changelog'))
| 31.873894 | 116 | 0.66669 | """
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source boostrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setupegg.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c. Build python
2.5 and python 2.6 installers.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import subprocess
import re
import shutil
try:
from hash import md5
except ImportError:
import md5
import distutils
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except ImportError, e:
raise RuntimeError("paver version >= 1.0 required")
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task
setup_py = __import__("setup")
FULLVERSION = setup_py.FULLVERSION
WINE_SITE_CFG = ""
if sys.platform == "darwin":
WINE_PY25 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python26/python.exe"
else:
WINE_PY25 = "/home/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/home/david/.wine/drive_c/Python26/python.exe"
WINE_PYS = {'2.6' : WINE_PY26, '2.5': WINE_PY25}
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
PDF_DESTDIR = paver.path.path('build') / 'pdf'
HTML_DESTDIR = paver.path.path('build') / 'html'
DOC_ROOT = paver.path.path("doc")
DOC_SRC = DOC_ROOT / "source"
DOC_BLD = DOC_ROOT / "build"
DOC_BLD_LATEX = DOC_BLD / "latex"
RELEASE = 'doc/release/1.3.0-notes.rst'
LOG_START = 'tags/1.2.0'
LOG_END = 'master'
BOOTSTRAP_DIR = "bootstrap"
BOOTSTRAP_PYEXEC = "%s/bin/python" % BOOTSTRAP_DIR
BOOTSTRAP_SCRIPT = "%s/bootstrap.py" % BOOTSTRAP_DIR
DMG_CONTENT = paver.path.path('numpy-macosx-installer') / 'content'
RELEASE_DIR = 'release'
INSTALLERS_DIR = os.path.join(RELEASE_DIR, 'installers')
MPKG_PYTHON = {"25": "/Library/Frameworks/Python.framework/Versions/2.5/bin/python",
"26": "/Library/Frameworks/Python.framework/Versions/2.6/bin/python"}
options(sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
virtualenv=Bunch(script_name=BOOTSTRAP_SCRIPT,packages_to_install=["sphinx==0.6.1"]),
wininst=Bunch(pyver="2.5", scratch=True))
@task
def bootstrap():
"""create virtualenv in ./install"""
install = paver.path.path(BOOTSTRAP_DIR)
if not install.exists():
install.mkdir()
call_task('paver.virtual.bootstrap')
sh('cd %s; %s bootstrap.py' % (BOOTSTRAP_DIR, sys.executable))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
paver.path.path(i).rmtree()
(paver.path.path('doc') / options.sphinx.builddir).rmtree()
@task
def clean_bootstrap():
paver.path.path('bootstrap').rmtree()
@task
@needs('clean', 'clean_bootstrap')
def nuke():
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
d = [SUPERPACK_BUILD, INSTALLERS_DIR]
for i in d:
paver.path.path(i).rmtree()
def compute_md5():
released = paver.path.path(INSTALLERS_DIR).listdir()
checksums = []
for f in released:
m = md5.md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), f))
return checksums
def write_release_task(filename='NOTES.txt'):
source = paver.path.path(RELEASE)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
""")
ftarget.writelines(['%s\n' % c for c in compute_md5()])
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'svn', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release():
write_release_task()
@task
def write_log():
write_log_task()
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
subprocess.check_call(["make", "html"], cwd="doc")
builtdocs = paver.path.path("doc") / options.sphinx.builddir / "html"
HTML_DESTDIR.rmtree()
builtdocs.copytree(HTML_DESTDIR)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(DOC_BLD_LATEX))
dry("Build pdf doc", build_pdf)
PDF_DESTDIR.rmtree()
PDF_DESTDIR.makedirs()
user = DOC_BLD_LATEX / "numpy-user.pdf"
user.copy(PDF_DESTDIR / "userguide.pdf")
ref = DOC_BLD_LATEX / "numpy-ref.pdf"
ref.copy(PDF_DESTDIR / "reference.pdf")
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist():
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(INSTALLERS_DIR, tarball_name(t))
shutil.copy(source, target)
#------------------
# Wine-based builds
#------------------
SSE3_CFG = {'BLAS': r'C:\local\lib\yop\sse3', 'LAPACK': r'C:\local\lib\yop\sse3'}
SSE2_CFG = {'BLAS': r'C:\local\lib\yop\sse2', 'LAPACK': r'C:\local\lib\yop\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\yop\nosse', 'LAPACK': r'C:\local\lib\yop\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
def internal_wininst_name(arch, ismsi=False):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
if ismsi:
ext = '.msi'
else:
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver, ismsi=False):
"""Return the name of the installer built by wininst command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
if ismsi:
ext = '.msi'
else:
ext = '.exe'
name = "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
return name
def bdist_wininst_arch(pyver, arch, scratch=True):
"""Arch specific wininst build."""
if scratch:
paver.path.path('build').rmtree()
if not os.path.exists(SUPERPACK_BINDIR):
os.makedirs(SUPERPACK_BINDIR)
_bdist_wininst(pyver, SITECFG[arch])
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
os.rename(source, target)
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
@task
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'nosse', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse2', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse3', scratch=options.wininst.scratch)
@task
@needs('bdist_wininst_nosse', 'bdist_wininst_sse2', 'bdist_wininst_sse3')
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
prepare_nsis_script(options.wininst.pyver, FULLVERSION)
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
source = os.path.join(SUPERPACK_BUILD,
superpack_name(options.wininst.pyver, FULLVERSION))
target = os.path.join(INSTALLERS_DIR,
superpack_name(options.wininst.pyver, FULLVERSION))
shutil.copy(source, target)
@task
@needs('clean', 'bdist_wininst')
def bdist_wininst_simple():
"""Simple wininst-based installer."""
_bdist_wininst(pyver=options.wininst.pyver)
def _bdist_wininst(pyver, cfg_env=WINE_SITE_CFG):
subprocess.check_call([WINE_PYS[pyver], 'setup.py', 'build', '-c', 'mingw32', 'bdist_wininst'], env=cfg_env)
#-------------------
# Mac OS X installer
#-------------------
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.mpkg" % \
(FULLVERSION, pyver, maj, min)
def dmg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.dmg" % \
(FULLVERSION, pyver, maj, min)
@task
def bdist_mpkg():
call_task("clean")
pyver = "".join([str(i) for i in sys.version_info[:2]])
sh("%s setupegg.py bdist_mpkg" % MPKG_PYTHON[pyver])
@task
@needs("bdist_mpkg", "pdf")
def dmg():
pyver = ".".join([str(i) for i in sys.version_info[:2]])
dmg_n = dmg_name()
dmg = paver.path.path('numpy-macosx-installer') / dmg_n
if dmg.exists():
dmg.remove()
# Clean the image source
content = DMG_CONTENT
content.rmtree()
content.mkdir()
# Copy mpkg into image source
mpkg_n = mpkg_name()
mpkg_tn = "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver)
mpkg_source = paver.path.path("dist") / mpkg_n
mpkg_target = content / mpkg_tn
mpkg_source.copytree(content / mpkg_tn)
# Copy docs into image source
#html_docs = HTML_DESTDIR
#html_docs.copytree(content / "Documentation" / "html")
pdf_docs = DMG_CONTENT / "Documentation"
pdf_docs.rmtree()
pdf_docs.makedirs()
user = PDF_DESTDIR / "userguide.pdf"
user.copy(pdf_docs / "userguide.pdf")
ref = PDF_DESTDIR / "reference.pdf"
ref.copy(pdf_docs / "reference.pdf")
# Build the dmg
cmd = ["./create-dmg", "--window-size", "500", "500", "--background",
"art/dmgbackground.png", "--icon-size", "128", "--icon", mpkg_tn,
"125", "320", "--icon", "Documentation", "375", "320", "--volname", "numpy",
dmg_n, "./content"]
subprocess.check_call(cmd, cwd="numpy-macosx-installer")
@task
def simple_dmg():
# Build the dmg
image_name = "numpy-%s.dmg" % FULLVERSION
image = paver.path.path(image_name)
image.remove()
cmd = ["hdiutil", "create", image_name, "-srcdir", str(builddir)]
sh(" ".join(cmd))
@task
def write_note_changelog():
write_release_task(os.path.join(RELEASE_DIR, 'NOTES.txt'))
write_log_task(os.path.join(RELEASE_DIR, 'Changelog'))
| false | true |
f7f6174e028a850a6d5030423ba6a670970fd734 | 3,413 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/value_catalog.py | rivamarco/alexa-apis-for-python | 62e3a9057a26003e836fa09aa12a2e1c8b62d6e0 | [
"Apache-2.0"
] | 2 | 2021-10-30T06:52:48.000Z | 2021-11-16T12:34:16.000Z | ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/value_catalog.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/value_catalog.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class ValueCatalog(object):
"""
Catalog reference to provide values.
:param id: CatalogId
:type id: (optional) str
:param version: Catalog version
:type version: (optional) str
"""
deserialized_types = {
'id': 'str',
'version': 'str'
} # type: Dict
attribute_map = {
'id': 'id',
'version': 'version'
} # type: Dict
supports_multiple_types = False
def __init__(self, id=None, version=None):
# type: (Optional[str], Optional[str]) -> None
"""Catalog reference to provide values.
:param id: CatalogId
:type id: (optional) str
:param version: Catalog version
:type version: (optional) str
"""
self.__discriminator_value = None # type: str
self.id = id
self.version = version
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ValueCatalog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 29.422414 | 96 | 0.570173 |
import pprint
import re
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class ValueCatalog(object):
deserialized_types = {
'id': 'str',
'version': 'str'
}
attribute_map = {
'id': 'id',
'version': 'version'
}
supports_multiple_types = False
def __init__(self, id=None, version=None):
self.__discriminator_value = None
self.id = id
self.version = version
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ValueCatalog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7f61882ca348a1ea42673283d7a4047a053d4fe | 2,501 | py | Python | src/wi/urls/user/public_ip.py | cc1-cloud/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 11 | 2015-05-06T14:16:54.000Z | 2022-02-08T23:21:31.000Z | src/wi/urls/user/public_ip.py | fortress-shell/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 1 | 2015-10-30T21:08:11.000Z | 2015-10-30T21:08:11.000Z | src/wi/urls/user/public_ip.py | fortress-shell/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 5 | 2016-02-12T22:01:38.000Z | 2021-12-06T16:56:54.000Z | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.urls.user.public_ip
@author Piotr Wójcik
@date 19.11.2010
"""
from django.conf.urls import patterns, url, include
from django.utils.translation import ugettext_lazy as _
from wi.utils.decorators import user_permission
from wi.utils.views import direct_to_template, get_list_generic, simple_generic, simple_generic_id
resources_patterns = patterns('wi.views.user.public_ip',
url(r'^$', user_permission(direct_to_template), {'template_name': 'resources/base.html'}, name='res_resources'),
url(r'^elastic_ip/$', user_permission(direct_to_template), {'template_name': 'resources/elastic_ip.html'}, name='res_elastic_ip'),
url(r'^ajax/ips_table/$', user_permission(get_list_generic), {'request_url': 'user/public_ip/get_list/'}, name='res_ajax_get_ips_table'),
url(r'^ajax/add_ip/$', user_permission(simple_generic),
{'template_name': 'generic/simple.html',
'success_msg': (lambda desc: _('New IP address has been added.') % {'desc': desc}),
'ask_msg': (lambda desc: _('Do you want to add an IP address?') % {'desc': desc}),
'request_url': 'user/public_ip/request/',
},
name='res_ajax_request_ip'),
url(r'^ajax/release_ip/(?P<id1>\d+)/$', user_permission(simple_generic_id),
{'template_name': 'generic/simple.html',
'success_msg': (lambda desc: _('IP address <b>%(desc)s</b> has been released.') % {'desc': desc}),
'ask_msg': (lambda desc: _('Do you want to release IP address <b>%(desc)s</b>?') % {'desc': desc}),
'request_url': 'user/public_ip/release/',
'id_key': 'public_ip_id',
},
name='res_ajax_release_ip'),
)
urlpatterns = patterns('',
url(r'^resources/', include(resources_patterns)),
)
| 43.12069 | 141 | 0.676529 |
from django.conf.urls import patterns, url, include
from django.utils.translation import ugettext_lazy as _
from wi.utils.decorators import user_permission
from wi.utils.views import direct_to_template, get_list_generic, simple_generic, simple_generic_id
resources_patterns = patterns('wi.views.user.public_ip',
url(r'^$', user_permission(direct_to_template), {'template_name': 'resources/base.html'}, name='res_resources'),
url(r'^elastic_ip/$', user_permission(direct_to_template), {'template_name': 'resources/elastic_ip.html'}, name='res_elastic_ip'),
url(r'^ajax/ips_table/$', user_permission(get_list_generic), {'request_url': 'user/public_ip/get_list/'}, name='res_ajax_get_ips_table'),
url(r'^ajax/add_ip/$', user_permission(simple_generic),
{'template_name': 'generic/simple.html',
'success_msg': (lambda desc: _('New IP address has been added.') % {'desc': desc}),
'ask_msg': (lambda desc: _('Do you want to add an IP address?') % {'desc': desc}),
'request_url': 'user/public_ip/request/',
},
name='res_ajax_request_ip'),
url(r'^ajax/release_ip/(?P<id1>\d+)/$', user_permission(simple_generic_id),
{'template_name': 'generic/simple.html',
'success_msg': (lambda desc: _('IP address <b>%(desc)s</b> has been released.') % {'desc': desc}),
'ask_msg': (lambda desc: _('Do you want to release IP address <b>%(desc)s</b>?') % {'desc': desc}),
'request_url': 'user/public_ip/release/',
'id_key': 'public_ip_id',
},
name='res_ajax_release_ip'),
)
urlpatterns = patterns('',
url(r'^resources/', include(resources_patterns)),
)
| true | true |
f7f61928737aa0f7253f968e5d6b948b5441f4c1 | 258 | py | Python | LeetCode/Palindrome Number/solution.py | anirbandey303/Hack-Codes | a75555e439529aa3e37a0fe59b1d4a4644625eec | [
"MIT"
] | 15 | 2021-10-06T07:34:15.000Z | 2022-01-25T06:58:59.000Z | LeetCode/Palindrome Number/solution.py | anirbandey303/Hack-Codes | a75555e439529aa3e37a0fe59b1d4a4644625eec | [
"MIT"
] | 55 | 2021-10-01T19:23:51.000Z | 2021-10-06T04:29:41.000Z | LeetCode/Palindrome Number/solution.py | anirbandey303/Hack-Codes | a75555e439529aa3e37a0fe59b1d4a4644625eec | [
"MIT"
] | 41 | 2021-10-01T19:30:51.000Z | 2021-10-05T19:36:15.000Z | # Author : thepmsquare
# Question Link : https://leetcode.com/problems/palindrome-number/
class Solution:
def isPalindrome(self, x: int) -> bool:
tempList = list(str(x))
tempList.reverse()
return str(x)=="".join(tempList)
| 32.25 | 66 | 0.627907 |
class Solution:
def isPalindrome(self, x: int) -> bool:
tempList = list(str(x))
tempList.reverse()
return str(x)=="".join(tempList)
| true | true |
f7f619864c8f96a2be2e0c7df6e83578cce34cf3 | 2,440 | py | Python | prototype/migrations/0001_initial.py | benthomasson/fsm-designer-svg | a1edbde3ac7e497e3700e38ed4741da75d376e6f | [
"BSD-3-Clause"
] | null | null | null | prototype/migrations/0001_initial.py | benthomasson/fsm-designer-svg | a1edbde3ac7e497e3700e38ed4741da75d376e6f | [
"BSD-3-Clause"
] | 7 | 2020-01-03T14:37:56.000Z | 2021-06-10T20:39:35.000Z | prototype/migrations/0001_initial.py | benthomasson/fsm-designer-svg | a1edbde3ac7e497e3700e38ed4741da75d376e6f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('client_id', models.AutoField(serialize=False, primary_key=True)),
],
),
migrations.CreateModel(
name='FiniteStateMachine',
fields=[
('finite_state_machine_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='History',
fields=[
('history_id', models.AutoField(serialize=False, primary_key=True)),
('message_id', models.IntegerField()),
('message_data', models.TextField()),
('undone', models.BooleanField(default=False)),
('client', models.ForeignKey(to='prototype.Client')),
('finite_state_machine', models.ForeignKey(to='prototype.FiniteStateMachine')),
],
),
migrations.CreateModel(
name='MessageType',
fields=[
('message_type_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='State',
fields=[
('state_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('finite_state_machine', models.ForeignKey(to='prototype.FiniteStateMachine')),
],
),
migrations.CreateModel(
name='Transition',
fields=[
('transition_id', models.AutoField(serialize=False, primary_key=True)),
('label', models.CharField(max_length=200)),
('from_state', models.ForeignKey(related_name='from_transition', to='prototype.State')),
('to_state', models.ForeignKey(related_name='to_transition', to='prototype.State')),
],
),
migrations.AddField(
model_name='history',
name='message_type',
field=models.ForeignKey(to='prototype.MessageType'),
),
]
| 36.41791 | 104 | 0.547951 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('client_id', models.AutoField(serialize=False, primary_key=True)),
],
),
migrations.CreateModel(
name='FiniteStateMachine',
fields=[
('finite_state_machine_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='History',
fields=[
('history_id', models.AutoField(serialize=False, primary_key=True)),
('message_id', models.IntegerField()),
('message_data', models.TextField()),
('undone', models.BooleanField(default=False)),
('client', models.ForeignKey(to='prototype.Client')),
('finite_state_machine', models.ForeignKey(to='prototype.FiniteStateMachine')),
],
),
migrations.CreateModel(
name='MessageType',
fields=[
('message_type_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='State',
fields=[
('state_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('finite_state_machine', models.ForeignKey(to='prototype.FiniteStateMachine')),
],
),
migrations.CreateModel(
name='Transition',
fields=[
('transition_id', models.AutoField(serialize=False, primary_key=True)),
('label', models.CharField(max_length=200)),
('from_state', models.ForeignKey(related_name='from_transition', to='prototype.State')),
('to_state', models.ForeignKey(related_name='to_transition', to='prototype.State')),
],
),
migrations.AddField(
model_name='history',
name='message_type',
field=models.ForeignKey(to='prototype.MessageType'),
),
]
| true | true |
f7f619b71ac85a55e2e7dbdda5f259471ca00f73 | 6,500 | py | Python | tests/test_train_utils.py | lzamparo/SeqDemote | 3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a | [
"MIT"
] | 1 | 2019-04-16T12:25:09.000Z | 2019-04-16T12:25:09.000Z | tests/test_train_utils.py | lzamparo/SeqDemote | 3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a | [
"MIT"
] | null | null | null | tests/test_train_utils.py | lzamparo/SeqDemote | 3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a | [
"MIT"
] | null | null | null | import os
import numpy as np
from nose.tools import eq_, ok_
import torch
import utils.train_utils as tr_utils
import utils.torch_model_construction_utils as tmu
def flip_random(data, num_labels):
''' return a column of 0,1 labels with num_labels flipped '''
length = data.shape[0]
flip_positions = np.random.randint(0,length,num_labels)
for position in flip_positions:
if data[position] == 0:
data[position] = 1
else:
data[position] = 0
return data
def make_fuzzy_predictions(preds, eps = 0.025, shape=0.1):
''' Add noise to 0-1 array to simulate predictions '''
zeros = preds[preds == 0]
ones = preds[preds == 1]
zero_noise = np.random.gamma(eps, shape, size=zeros.shape)
ones_noise = -1.0 * np.random.gamma(eps, shape, size=ones.shape)
noisy_zeros = zeros + zero_noise
noisy_ones = ones + ones_noise
preds[preds == 0] = noisy_zeros
preds[preds == 1] = noisy_ones
assert(np.alltrue(preds > 0))
assert(np.alltrue(preds <= 1))
return preds
def make_classification_labels_and_preds(shape=(128,164), p=0.1,
flips=10, noisy=False,
eps=0.025, g_shape=0.1,
as_list=False):
''' fixture generator for mt_aupr / mt_auroc
returns labels, y_hat
as_list := return y_hat fixture as a list of one-dimensional arrays '''
labels = np.random.binomial(1,p,size=shape)
preds = np.array(labels.copy(), dtype=np.float)
for col in preds.transpose():
col = flip_random(col, flips)
if noisy:
preds = make_fuzzy_predictions(preds, eps, g_shape)
if as_list:
preds_list = [preds[:,i] for i in range(preds.shape[1])]
return labels, preds_list
return labels, preds
def make_presigmoid_activations(preds, confident=True, to_tensor=False):
''' fixture generator for pre-sigmoid activations from
network output. Makes more confident predictions or less
confident predictions'''
extended_activations = np.zeros_like(preds)
if confident:
noise = np.random.gamma(5, 1, size=extended_activations.shape)
else:
noise = np.random.gamma(1,0.5, size=extended_activations.shape)
# want to iterate elementwise here, maybe flatten / it / reshape?
for e, p, n in zip(np.nditer(extended_activations, op_flags=[['readwrite']]), preds.flat, noise.flat):
if p > 0.5:
e += n
else:
e -= n
if to_tensor:
return torch.tensor(extended_activations)
return extended_activations
def test_focal_loss():
''' make sure focal loss increases for uncertain predictions '''
### If I need to compare the weights, losses pre-activations for each fixture
### across all tasks, set reduce=False
labels, preds = make_classification_labels_and_preds(shape=(4,4), flips=1)
focal_loss = tmu.FocalLoss(reduce=True)
# generate certain predictions, calculate focal loss
certain_activations = make_presigmoid_activations(preds, confident=True, to_tensor=True)
certain_loss = tr_utils.per_task_loss(certain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)
# generate less-certain predictions, calculate focal loss
uncertain_activations = make_presigmoid_activations(preds, confident=False, to_tensor=True)
uncertain_loss = tr_utils.per_task_loss(uncertain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)
# Q: should less-certain losses have much greater loss?
# A: depends on the level of certainty (i.e p_t) and gamma.
ok_(sum(uncertain_loss) < sum(certain_loss))
def test_st_accuracy():
''' make sure ST accuracy works '''
test_labels, test_preds = make_classification_labels_and_preds()
test_labels = test_labels[:,0]
test_preds = test_preds[:,0]
test_accuracy = tr_utils.st_accuracy(test_labels, test_preds)
ok_(0.5 <= test_accuracy <= 1.0)
def test_mt_accuracy():
''' make sure MT accuracy works '''
test_labels, test_preds = make_classification_labels_and_preds()
test_accuracy = tr_utils.mt_accuracy(test_labels, test_preds)
ok_(0.5 <= test_accuracy <= 1.0)
def test_mt_precision():
''' make sure MT precision works '''
test_labels, test_preds = make_classification_labels_and_preds()
test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_noisy_mt_precision():
''' make sure MT precision works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_precision_at_recall():
''' make sure MT precision at recall works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_precision_at_recall_list():
''' make sure MT precision at recall works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_f1():
''' make sure MT f1 works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)
ok_(0.0 <= test_f1 <= 1.0)
def test_mt_f1_list():
''' make sure MT f1 with predictions as list works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)
ok_(0.0 <= test_f1 <= 1.0)
def test_mt_mcc():
''' make sure MT MCC works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)
ok_(-1.0 <= test_mcc <= 1.0)
def test_mt_mcc_list():
''' make sure MT MCC works '''
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)
ok_(-1.0 <= test_mcc <= 1.0)
| 37.572254 | 134 | 0.680615 | import os
import numpy as np
from nose.tools import eq_, ok_
import torch
import utils.train_utils as tr_utils
import utils.torch_model_construction_utils as tmu
def flip_random(data, num_labels):
length = data.shape[0]
flip_positions = np.random.randint(0,length,num_labels)
for position in flip_positions:
if data[position] == 0:
data[position] = 1
else:
data[position] = 0
return data
def make_fuzzy_predictions(preds, eps = 0.025, shape=0.1):
zeros = preds[preds == 0]
ones = preds[preds == 1]
zero_noise = np.random.gamma(eps, shape, size=zeros.shape)
ones_noise = -1.0 * np.random.gamma(eps, shape, size=ones.shape)
noisy_zeros = zeros + zero_noise
noisy_ones = ones + ones_noise
preds[preds == 0] = noisy_zeros
preds[preds == 1] = noisy_ones
assert(np.alltrue(preds > 0))
assert(np.alltrue(preds <= 1))
return preds
def make_classification_labels_and_preds(shape=(128,164), p=0.1,
flips=10, noisy=False,
eps=0.025, g_shape=0.1,
as_list=False):
labels = np.random.binomial(1,p,size=shape)
preds = np.array(labels.copy(), dtype=np.float)
for col in preds.transpose():
col = flip_random(col, flips)
if noisy:
preds = make_fuzzy_predictions(preds, eps, g_shape)
if as_list:
preds_list = [preds[:,i] for i in range(preds.shape[1])]
return labels, preds_list
return labels, preds
def make_presigmoid_activations(preds, confident=True, to_tensor=False):
extended_activations = np.zeros_like(preds)
if confident:
noise = np.random.gamma(5, 1, size=extended_activations.shape)
else:
noise = np.random.gamma(1,0.5, size=extended_activations.shape)
for e, p, n in zip(np.nditer(extended_activations, op_flags=[['readwrite']]), preds.flat, noise.flat):
if p > 0.5:
e += n
else:
e -= n
if to_tensor:
return torch.tensor(extended_activations)
return extended_activations
def test_focal_loss():
o_tensor=True)
certain_loss = tr_utils.per_task_loss(certain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)
uncertain_activations = make_presigmoid_activations(preds, confident=False, to_tensor=True)
uncertain_loss = tr_utils.per_task_loss(uncertain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)
ok_(sum(uncertain_loss) < sum(certain_loss))
def test_st_accuracy():
test_labels, test_preds = make_classification_labels_and_preds()
test_labels = test_labels[:,0]
test_preds = test_preds[:,0]
test_accuracy = tr_utils.st_accuracy(test_labels, test_preds)
ok_(0.5 <= test_accuracy <= 1.0)
def test_mt_accuracy():
test_labels, test_preds = make_classification_labels_and_preds()
test_accuracy = tr_utils.mt_accuracy(test_labels, test_preds)
ok_(0.5 <= test_accuracy <= 1.0)
def test_mt_precision():
test_labels, test_preds = make_classification_labels_and_preds()
test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_noisy_mt_precision():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_precision_at_recall():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_precision_at_recall_list():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)
ok_(0.0 <= test_precision <= 1.0)
def test_mt_f1():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)
ok_(0.0 <= test_f1 <= 1.0)
def test_mt_f1_list():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)
ok_(0.0 <= test_f1 <= 1.0)
def test_mt_mcc():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True)
test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)
ok_(-1.0 <= test_mcc <= 1.0)
def test_mt_mcc_list():
test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)
test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)
ok_(-1.0 <= test_mcc <= 1.0)
| true | true |
f7f61ac219d2200b7e65d0eae4084e04ebd39cfb | 24 | py | Python | code/abc011_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc011_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc011_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | print(int(input())%12+1) | 24 | 24 | 0.666667 | print(int(input())%12+1) | true | true |
f7f61af76a6a9b609a1164d84c74f64ac50a6624 | 2,502 | py | Python | src/experiment/model_management.py | jp2011/spatial-poisson-mixtures | 9e535a636e710a9fa146cbbd4613ece70ec90791 | [
"MIT"
] | 3 | 2020-06-18T10:57:47.000Z | 2022-03-07T12:13:04.000Z | src/experiment/model_management.py | jp2011/spatial-poisson-mixtures | 9e535a636e710a9fa146cbbd4613ece70ec90791 | [
"MIT"
] | null | null | null | src/experiment/model_management.py | jp2011/spatial-poisson-mixtures | 9e535a636e710a9fa146cbbd4613ece70ec90791 | [
"MIT"
] | null | null | null | def build_lgcp_uid(*, prefix='LGCP-MATERN',
chain_no=None,
c_type='burglary',
t_period='12015-122015',
cov_interpolation='weighted',
model_spec=None,
resolution=400):
"""Create a unique identifier for a model with context specified through the parameters"""
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{c_type}--{model_spec}--{t_period}--{cov_interpolation}--{resolution}"
else:
return f"{prefix}--{c_type}--{model_spec}--{t_period}--{cov_interpolation}--{resolution}"
def build_block_mixture_flat_uid(*, prefix='BLOCK-MIXTURE-FLAT',
chain_no=None,
block_scheme='msoa',
c_type='burglary',
t_period='12015-122015',
model_spec=None,
cov_interpolation='weighted',
resolution=400,
K=None):
"""Create a unique identifier for a model with context specified through the parameters"""
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{block_scheme}--{c_type}--{t_period}--{model_spec}--{cov_interpolation}--{resolution}--{K}"
else:
return f"{prefix}--{block_scheme}--{c_type}--{t_period}--{model_spec}--{cov_interpolation}--{resolution}--{K}"
def build_block_mixture_gp_softmax_uid(*, prefix='BLOCK-MIXTURE-GP',
chain_no=None,
block_scheme='msoa',
c_type='burglary',
t_period='12015-122015',
model_spec=None,
resolution=400,
lengthscale=None,
K=None):
"""Create a unique identifier for a model with context specified through the parameters"""
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{block_scheme}--{c_type}--{t_period}--{model_spec}--weighted--{resolution}--{t_period}--{K}--{lengthscale}--0_0"
else:
return f"{prefix}--{block_scheme}--{c_type}--{t_period}--{model_spec}--weighted--{resolution}--{t_period}--{K}--{lengthscale}--0_0"
| 55.6 | 156 | 0.489608 | def build_lgcp_uid(*, prefix='LGCP-MATERN',
chain_no=None,
c_type='burglary',
t_period='12015-122015',
cov_interpolation='weighted',
model_spec=None,
resolution=400):
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{c_type}--{model_spec}--{t_period}--{cov_interpolation}--{resolution}"
else:
return f"{prefix}--{c_type}--{model_spec}--{t_period}--{cov_interpolation}--{resolution}"
def build_block_mixture_flat_uid(*, prefix='BLOCK-MIXTURE-FLAT',
chain_no=None,
block_scheme='msoa',
c_type='burglary',
t_period='12015-122015',
model_spec=None,
cov_interpolation='weighted',
resolution=400,
K=None):
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{block_scheme}--{c_type}--{t_period}--{model_spec}--{cov_interpolation}--{resolution}--{K}"
else:
return f"{prefix}--{block_scheme}--{c_type}--{t_period}--{model_spec}--{cov_interpolation}--{resolution}--{K}"
def build_block_mixture_gp_softmax_uid(*, prefix='BLOCK-MIXTURE-GP',
chain_no=None,
block_scheme='msoa',
c_type='burglary',
t_period='12015-122015',
model_spec=None,
resolution=400,
lengthscale=None,
K=None):
if chain_no:
return f"{prefix}-CHAIN-{chain_no}--{block_scheme}--{c_type}--{t_period}--{model_spec}--weighted--{resolution}--{t_period}--{K}--{lengthscale}--0_0"
else:
return f"{prefix}--{block_scheme}--{c_type}--{t_period}--{model_spec}--weighted--{resolution}--{t_period}--{K}--{lengthscale}--0_0"
| true | true |
f7f61bdf68268541e60328e74350d25de8671d0f | 3,230 | py | Python | example_new_surface.py | gerph/cairo-vnc | 505d12e4404954d59c879a34c5f2c427aa4fe354 | [
"MIT"
] | 3 | 2020-12-29T20:53:03.000Z | 2020-12-30T19:50:49.000Z | example_new_surface.py | gerph/cairo-vnc | 505d12e4404954d59c879a34c5f2c427aa4fe354 | [
"MIT"
] | null | null | null | example_new_surface.py | gerph/cairo-vnc | 505d12e4404954d59c879a34c5f2c427aa4fe354 | [
"MIT"
] | null | null | null | """
Demonstrate that we can change the surface, and its size, as we go.
Run a simple animation in the Cairo surface, on a thread.
Then run a server on localhost:5902 / localhost:2 which should display the animation.
Every 20 frames, the surface is recreated at a different size.
Each time it is recreated, we must notify the VNC server of the
new surface object.
"""
import math
import threading
import time
import cairo
import cairovnc
class Screen(object):
surface_change_func = None
def __init__(self):
self.width = 200
self.height = 200
self.seq = 0
self.setup_surface()
def setup_surface(self):
self.surface = cairo.ImageSurface(cairo.Format.ARGB32, self.width, self.height)
self.context = cairo.Context(self.surface)
if self.surface_change_func:
self.surface_change_func(self.surface)
def draw(self):
self.context.set_source_rgb(0.5, 0.5, 0.5)
self.context.rectangle(0, 0, self.width, self.height)
self.context.fill()
self.context.set_source_rgb(1, 1, 1)
delta = math.cos(self.seq * math.pi / 10)
x, y, x1, y1 = 0.1, 0.5, 0.4, 0.5 + delta * 0.4
x2, y2, x3, y3 = 0.6, 0.1, 0.9, 0.5
self.context.save()
self.context.scale(self.width, self.height)
# Bezier curve
self.context.set_line_width(0.04)
self.context.move_to(x, y)
self.context.curve_to(x1, y1, x2, y2, x3, y3)
self.context.stroke()
# Control points
self.context.set_source_rgba(1, 0.2, 0.2, 0.6)
self.context.set_line_width(0.02)
self.context.move_to(x, y)
self.context.line_to(x1, y1)
self.context.move_to(x2, y2)
self.context.line_to(x3, y3)
self.context.stroke()
# Red square
self.context.set_source_rgb(1, 0, 0)
self.context.rectangle(0.1, 0 + delta * 0.05, 0.1, 0.1)
self.context.fill()
# Green square
self.context.set_source_rgb(0, 1, 0)
self.context.rectangle(0.3, 0, 0.1, 0.1)
self.context.fill()
# Blue square
self.context.set_source_rgb(0, 0, 1)
self.context.rectangle(0.5, 0, 0.1, 0.1)
self.context.fill()
self.context.restore()
#self.surface.write_to_png('image.png')
def animate(self):
while True:
self.seq += 1
time.sleep(0.1)
# Once every 20 calls we resize the surface
if self.seq % 20 == 0:
if int(self.seq / 20) % 2:
self.width = self.width + 20
else:
self.width = self.width - 20
self.setup_surface()
self.draw()
screen = Screen()
animate_thread = threading.Thread(target=screen.animate)
animate_thread.daemon = True
animate_thread.start()
if __name__ == "__main__":
# Create the server with options
options = cairovnc.CairoVNCOptions(port=5902)
server = cairovnc.CairoVNCServer(surface=screen.surface, options=options)
# We require a change function to update the clients with the new size
screen.surface_change_func = server.change_surface
server.serve_forever()
| 28.584071 | 87 | 0.614241 |
import math
import threading
import time
import cairo
import cairovnc
class Screen(object):
surface_change_func = None
def __init__(self):
self.width = 200
self.height = 200
self.seq = 0
self.setup_surface()
def setup_surface(self):
self.surface = cairo.ImageSurface(cairo.Format.ARGB32, self.width, self.height)
self.context = cairo.Context(self.surface)
if self.surface_change_func:
self.surface_change_func(self.surface)
def draw(self):
self.context.set_source_rgb(0.5, 0.5, 0.5)
self.context.rectangle(0, 0, self.width, self.height)
self.context.fill()
self.context.set_source_rgb(1, 1, 1)
delta = math.cos(self.seq * math.pi / 10)
x, y, x1, y1 = 0.1, 0.5, 0.4, 0.5 + delta * 0.4
x2, y2, x3, y3 = 0.6, 0.1, 0.9, 0.5
self.context.save()
self.context.scale(self.width, self.height)
self.context.set_line_width(0.04)
self.context.move_to(x, y)
self.context.curve_to(x1, y1, x2, y2, x3, y3)
self.context.stroke()
self.context.set_source_rgba(1, 0.2, 0.2, 0.6)
self.context.set_line_width(0.02)
self.context.move_to(x, y)
self.context.line_to(x1, y1)
self.context.move_to(x2, y2)
self.context.line_to(x3, y3)
self.context.stroke()
self.context.set_source_rgb(1, 0, 0)
self.context.rectangle(0.1, 0 + delta * 0.05, 0.1, 0.1)
self.context.fill()
self.context.set_source_rgb(0, 1, 0)
self.context.rectangle(0.3, 0, 0.1, 0.1)
self.context.fill()
self.context.set_source_rgb(0, 0, 1)
self.context.rectangle(0.5, 0, 0.1, 0.1)
self.context.fill()
self.context.restore()
def animate(self):
while True:
self.seq += 1
time.sleep(0.1)
if self.seq % 20 == 0:
if int(self.seq / 20) % 2:
self.width = self.width + 20
else:
self.width = self.width - 20
self.setup_surface()
self.draw()
screen = Screen()
animate_thread = threading.Thread(target=screen.animate)
animate_thread.daemon = True
animate_thread.start()
if __name__ == "__main__":
options = cairovnc.CairoVNCOptions(port=5902)
server = cairovnc.CairoVNCServer(surface=screen.surface, options=options)
screen.surface_change_func = server.change_surface
server.serve_forever()
| true | true |
f7f61be8f8402c2241c0bd4880a84e01f0b1528c | 27,622 | py | Python | main_dasalc.py | albpurpura/PLTR | e21d3eb24fb0d269abd68ba23677501c30bb08eb | [
"CC0-1.0"
] | 1 | 2022-02-16T02:00:17.000Z | 2022-02-16T02:00:17.000Z | main_dasalc.py | albpurpura/PLTR | e21d3eb24fb0d269abd68ba23677501c30bb08eb | [
"CC0-1.0"
] | null | null | null | main_dasalc.py | albpurpura/PLTR | e21d3eb24fb0d269abd68ba23677501c30bb08eb | [
"CC0-1.0"
] | null | null | null | import argparse
import logging
import os
import time
import uuid
import numpy as np
import pyltr
import tensorflow as tf
from evaluation import compute_mean_ndcg, compute_perf_metrics, create_trec_eval_format_run_qrels
from lambdamart import compute_lambdamart_preds
# from model import ReRanker
from dasalc_model import ReRanker
from simulate_unsupervised_rj import compare_artif_rj_with_real_ones, sample_labels
from utils import load_model, pad_list, save_model
flags = tf.app.flags
FLAGS = flags.FLAGS
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument("--coll_name", type=str, default='MQ2007', help="Collection name")
parser.add_argument("--data_folder", type=str, default='../LETOR_data/MQ2007/', help="Data folder.")
parser.add_argument("--simulate_labels", type=str, default=False,
help="Whether to train with simulated labels or not.")
parser.add_argument("--expand_training_data", type=str, default=False,
help="Whether to expand training data or not.")
parser.add_argument("--det_model", type=str, default=True, help="Whether to use probabilistic layers or not.")
parser.add_argument("--rerank_lambdamart", type=str, default=False,
help="Whether to rerank lambdamart preds or from scratch.")
parser.add_argument("--lambdamart_preds_path", type=str, default='../LETOR_data/MQ2007/lambdamart_runs',
help="LM data folder.")
# model parameters
parser.add_argument("--seed", type=float, default=0, help="The random seed to use.")
parser.add_argument("--n_binomial_samples", type=float, default=32,
help="The number of binomial samples to simulate.")
# parser.add_argument("--loss", type=str, default='Hinge', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_B', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_G', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_G_H', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_B_H', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='ApproxNDCG', help="The loss to use to train the model.")
parser.add_argument("--loss", type=str, default='ApproxNDCG_G', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='MSE', help="The loss to use to train the model.")
parser.add_argument("--norm_labels", type=bool, default=False,
help="Whether to normalize within [0,1] the relevance labels.")
parser.add_argument("--num_features", type=int, default=46, help="Number of features per document.")
parser.add_argument("--num_epochs", type=int, default=100, help="The number of epochs for training.")
parser.add_argument("--n_heads", type=int, default=2, help="Num heads.")
parser.add_argument("--batch_size", type=int, default=4, help="The batch size for training.") # MQ2007
# parser.add_argument("--batch_size", type=int, default=2, help="The batch size for training.") # MQ2008
parser.add_argument("--list_size_test", type=int, default=147, help="List size.") # MQ2007
# parser.add_argument("--list_size_test", type=int, default=120, help="List size.") # MQ2008
# parser.add_argument("--list_size_train", type=int, default=147, help="List size.")
parser.add_argument("--learning_rate", type=float, default=1e-3, help="Learning rate for optimizer.") # MQ2008 and MQ2007
parser.add_argument("--model_ckpt_path", type=str, default='./output/chkpts/',
help="Output path for checkpoint saving.")
def remove_queries_without_rel_docs(rj, docs, rl_lengths, dids):
indices_to_remove = []
for i in range(len(rj)):
if max(rj[i]) == 0:
indices_to_remove.append(i)
rj = [rj[i] for i in range(len(rj)) if i not in indices_to_remove]
docs = [docs[i] for i in range(len(docs)) if i not in indices_to_remove]
rl_lengths = [rl_lengths[i] for i in range(len(rl_lengths)) if i not in indices_to_remove]
dids = [dids[i] for i in range(len(dids)) if i not in indices_to_remove]
return rj, docs, rl_lengths, dids
def group_docs_with_lambdamart_preds(preds, qids, docs, labels, max_list_size):
grouped = {}
for i in range(len(qids)):
if qids[i] in grouped.keys():
grouped[qids[i]].append((preds[i], docs[i], labels[i]))
else:
grouped[qids[i]] = [(preds[i], docs[i], labels[i])]
grouped_docs = []
grouped_labels = []
rl_lengths = []
for group in grouped.values():
g = np.array(group)
lmp = g[:, 0]
indices = np.argsort(-lmp)
ranked_list = list(g[:, 1][indices])
ranked_labels = list(g[:, 2][indices])
while len(ranked_list) < max_list_size:
ranked_list.append(np.zeros(FLAGS.num_features))
ranked_labels.append(0.0)
ranked_list = ranked_list[:max_list_size]
ranked_labels = ranked_labels[:max_list_size]
grouped_docs.append(ranked_list)
grouped_labels.append(ranked_labels)
rl_lengths.append(min(max_list_size, len(lmp)))
return grouped_docs, grouped_labels, rl_lengths
def read_data(data_folder, fold_f):
# data_fpath = './data_proc/{}_{}_listSize={}_rerank_lambdamart={}.hkl'.format(FLAGS.coll_name, fold_f,
# FLAGS.list_size_test,
# FLAGS.rerank_lambdamart)
# if not os.path.isfile(data_fpath) or not FLAGS.load_proc_data:
training_file_path = os.path.join(os.path.join(data_folder, fold_f), 'train.txt')
valid_file_path = os.path.join(os.path.join(data_folder, fold_f), 'vali.txt')
test_file_path = os.path.join(os.path.join(data_folder, fold_f), 'test.txt')
docs_train, lab_train, qids_train, _ = pyltr.data.letor.read_dataset(open(training_file_path))
docs_val, lab_val, qids_val, _ = pyltr.data.letor.read_dataset(open(valid_file_path))
docs_test, lab_test, qids_test, _ = pyltr.data.letor.read_dataset(open(test_file_path))
dids_train = ['fake_did_{}'.format(i) for i in range(len(docs_train))]
dids_test = ['fake_did_{}'.format(i) for i in range(len(docs_test))]
dids_val = ['fake_did_{}'.format(i) for i in range(len(docs_val))]
max_l = np.max(lab_train)
print('max label: {}'.format(max_l))
lab_train = np.array(lab_train) / max_l
lab_val = np.array(lab_val) / max_l
lab_test = np.array(lab_test) / max_l
assert 0 <= max(lab_test) <= 1
assert 0 <= max(lab_train) <= 1
assert 0 <= max(lab_val) <= 1
# without lambdamart
ranking_lists_train, all_labels_train, rl_lengths_train, resp_qids_train, resp_dids_train = \
group_data_in_ranking_lists(docs_train, lab_train, qids_train, dids_train, FLAGS.list_size_test)
ranking_lists_val, all_labels_val, rl_lengths_val, resp_qids_val, resp_dids_val = \
group_data_in_ranking_lists(docs_val, lab_val, qids_val, dids_val, FLAGS.list_size_test)
ranking_lists_test, all_labels_test, rl_lengths_test, resp_qids_test, resp_dids_test = \
group_data_in_ranking_lists(docs_test, lab_test, qids_test, dids_test, FLAGS.list_size_test)
# if FLAGS.load_proc_data:
# print('dumping data')
# save_model(((ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train),
# (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test),
# (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val),
# (np.array(lab_val, dtype=np.float32), np.array(lab_test, dtype=np.float32),
# np.array(qids_val, dtype=np.float32), np.array(qids_test, dtype=np.float32))),
# data_fpath)
# else:
# print('loading data')
# (ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train), \
# (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test), \
# (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val), \
# (lab_val, lab_test, qids_val, qids_test) = load_model(data_fpath)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
if FLAGS.simulate_labels:
# artif_labels = compute_simulated_labels(ranking_lists_train, rl_lengths_train, all_labels_train)
artif_labels = sample_labels(all_labels_train, rl_lengths_train, FLAGS.n_binomial_samples)
compare_artif_rj_with_real_ones(artif_labels, all_labels_train, rl_lengths_train)
all_labels_train = artif_labels
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
# avg_n_rel_docs = np.mean([np.sum([1 for rj in rl if rj > 0]) for rl in all_labels_train])
# print('avg number of relevant documents per ranked list in training data: {}'.format(avg_n_rel_docs))
if FLAGS.expand_training_data:
ranking_lists_train, all_labels_train, rl_lengths_train = augment_training_data(ranking_lists_train,
all_labels_train,
rl_lengths_train)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
else:
FLAGS.list_size_train = FLAGS.list_size_test
return ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val, lab_test, qids_val, qids_test
def augment_training_data(training_docs, training_rj, rl_lengths):
training_rj = np.array(training_rj)
rl_lengths = np.array(rl_lengths)
n_samples_per_rl = 5
new_ranked_lists = []
new_rj = []
new_lengths = []
for i in range(len(training_docs)):
docs_to_sample = np.array(training_docs[i][:rl_lengths[i]])
for _ in range(n_samples_per_rl):
sel_indices = np.random.choice([idx for idx in range(len(docs_to_sample))], size=FLAGS.list_size_train,
replace=True)
new_ranked_lists.append(docs_to_sample[sel_indices])
new_rj.append(training_rj[i][sel_indices])
new_lengths.append(FLAGS.list_size_train)
return new_ranked_lists, new_rj, new_lengths
def load_lambdaMART_preds(fold_f, lambdamart_preds_path):
"""
Fold Training.txt Validation.txt Test.txt
Fold1 S1, S2, S3 S4 S5
Fold2 S2, S3, S4 S5 S1
Fold3 S3, S4, S5 S1 S2
Fold4 S4, S5, S1 S2 S3
Fold5 S5, S1, S2 S3 S4
"""
test_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + fold_f + '.hkl')
if not os.path.isfile(test_preds_path):
compute_lambdamart_preds(FLAGS)
test_preds = load_model(test_preds_path)
training_folds = []
validation_folds = []
if fold_f == 'Fold1':
training_folds = ['Fold2', 'Fold3', 'Fold4']
validation_folds = ['Fold5']
elif fold_f == 'Fold2':
training_folds = ['Fold3', 'Fold4', 'Fold5']
validation_folds = ['Fold1']
elif fold_f == 'Fold3':
training_folds = ['Fold4', 'Fold5', 'Fold1']
validation_folds = ['Fold2']
elif fold_f == 'Fold4':
training_folds = ['Fold5', 'Fold1', 'Fold2']
validation_folds = ['Fold3']
elif fold_f == 'Fold5':
training_folds = ['Fold1', 'Fold2', 'Fold3']
validation_folds = ['Fold4']
training_preds = []
for ff in training_folds:
tmp_model_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + ff + '.hkl')
training_preds.extend(load_model(tmp_model_path))
val_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + validation_folds[0] + '.hkl')
val_preds = load_model(val_preds_path)
return training_preds, test_preds, val_preds
def group_data_in_ranking_lists(vectors, labels, qids, dids, list_size):
assert len(qids) == len(labels)
assert len(qids) == len(vectors)
data_indices_grouped_by_qid = {}
for i in range(len(qids)):
curr_qid = qids[i]
if curr_qid not in data_indices_grouped_by_qid.keys():
data_indices_grouped_by_qid[curr_qid] = [i]
else:
data_indices_grouped_by_qid[curr_qid].append(i)
print('mean ranking list length: %2.4f' % np.mean([len(item) for item in data_indices_grouped_by_qid.values()]))
print('max ranking list length: %2.4f' % np.max([len(item) for item in data_indices_grouped_by_qid.values()]))
print('min ranking list length: %2.4f' % np.min([len(item) for item in data_indices_grouped_by_qid.values()]))
ranking_lists = []
all_labels = []
rl_lengths = []
resp_qids = []
all_dids = []
for qid, indices_group in data_indices_grouped_by_qid.items():
curr_dids = [dids[i] for i in indices_group]
vecs = [vectors[i] for i in indices_group]
curr_labels = [labels[i] for i in indices_group]
original_rl_len = len(curr_labels)
# pad ranking lists now
vecs = pad_list(vecs, list_size)
curr_labels = curr_labels[0: min(list_size, len(curr_labels))]
curr_labels = curr_labels + [0.0] * (list_size - len(curr_labels))
resp_qids.append(qid)
curr_dids = curr_dids[0: min(list_size, len(curr_dids))]
curr_dids.extend('padding_did_{}'.format(i) for i in range(list_size - len(curr_dids)))
# append to output values
all_labels.append(curr_labels)
ranking_lists.append(vecs)
all_dids.append(curr_dids)
rl_lengths.append(min(list_size, original_rl_len))
return ranking_lists, all_labels, rl_lengths, resp_qids, all_dids
def group_rj_in_ranking_lists_no_pad_trim(qids, labs):
ranking_lists = {}
for i in range(len(qids)):
qid = qids[i]
label = labs[i]
if qid in ranking_lists.keys():
ranking_lists[qid].append(label)
else:
ranking_lists[qid] = [label]
doc_scores = []
doc_rj = []
for k, ranking_list in ranking_lists.items():
curr_scores = []
curr_rj = []
for i in range(len(ranking_list)):
curr_rj.append(ranking_list[i])
doc_scores.append(curr_scores)
doc_rj.append(curr_rj)
return doc_rj
def compute_ranking_lists_rl_length_masks(rl_lengths, list_size):
rl_masks = []
for i in range(len(rl_lengths)):
curr_v = np.zeros(list_size)
for j in range(min(len(curr_v), rl_lengths[i])):
curr_v[j] = 1
rl_masks.append(curr_v)
return rl_masks
def remove_training_rl_without_rel_docs(train_rj, train_docs, rl_lengths_train):
indices_to_remove_train = []
for i in range(len(train_rj)):
if max(train_rj[i]) == 0:
indices_to_remove_train.append(i)
train_rj = [train_rj[i] for i in range(len(train_rj)) if i not in indices_to_remove_train]
train_docs = [train_docs[i] for i in range(len(train_docs)) if i not in indices_to_remove_train]
rl_lengths_train = [rl_lengths_train[i] for i in range(len(rl_lengths_train)) if i not in indices_to_remove_train]
return train_rj, train_docs, rl_lengths_train
def test_model(sess, model, model_path, test_rj, test_docs, rl_lengths, qids_test, labels_test_non_grouped,
silent=False):
rl_test_masks = compute_ranking_lists_rl_length_masks(rl_lengths, FLAGS.list_size_test)
# initialize graph and session
# tf.reset_default_graph()
# sess_config = tf.ConfigProto()
# sess_config.gpu_options.allow_growth = True
# sess = tf.Session(config=sess_config, graph=tf.get_default_graph())
# initialize model
# model = ReRanker(FLAGS.seed, FLAGS.learning_rate, det_model=FLAGS.det_model, n_heads=FLAGS.n_heads,
# num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,
# loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,
# norm_labels=FLAGS.norm_labels)
tf.set_random_seed(FLAGS.seed)
# sess.run(model.init_op)
model.saver.restore(sess, model_path)
sess.graph.finalize()
# compute_predictions
msamples = 50
if FLAGS.det_model:
msamples = 1
all_preds = np.zeros(shape=(msamples, len(test_docs), FLAGS.list_size_test))
for k in range(msamples):
scores = sess.run(model.logits,
{model.training: False, model.input_docs: test_docs, model.rl_lengths_mask: rl_test_masks})
if FLAGS.loss == 'ML':
all_preds[k] = np.argmax(scores, axis=-1)
else:
all_preds[k] = scores
avg_preds = np.mean(all_preds, axis=0)
var_preds = np.var(all_preds, axis=0)
for i in range(len(rl_test_masks)):
for j in range(len(rl_test_masks[i])):
if rl_test_masks[i][j] == 0:
rl_test_masks[i][j] = 0 # -np.inf
else:
rl_test_masks[i][j] = 0
avg_preds = rl_test_masks + avg_preds
var_preds = rl_test_masks + var_preds
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(qids_test, labels_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
ndcg_1, base_1 = compute_mean_ndcg(avg_preds, test_rj, ideal_rel_j_lists, 1)
return avg_preds, ndcg_1, var_preds, compute_perf_metrics(avg_preds, test_rj, ideal_rel_j_lists, silent, rl_lengths)
def get_batches(all_docs, all_labels, rl_lengths_mask):
db = []
rb = []
lb = []
for i in range(len(all_docs)):
db.append(all_docs[i])
rb.append(all_labels[i])
lb.append(rl_lengths_mask[i])
if len(db) == FLAGS.batch_size:
yield db, rb, lb
db = []
rb = []
lb = []
if len(db) > 0:
yield db, rb, lb
def train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix):
ckpt_paths = []
perfs = []
max_patience = 20
patience = 20
ploss = None
early_stopping = False
for epoch in range(1, FLAGS.num_epochs + 1):
if early_stopping:
break
print('*** EPOCH: %d/%d' % (epoch, FLAGS.num_epochs))
start = time.time()
for db, rjb, lenb in get_batches(train_docs, train_rj, rl_train_masks):
_, step, loss = sess.run(
[model.train_op, model.global_step, model.loss],
feed_dict={model.input_docs: db,
model.relevance_judgments: rjb,
model.rl_lengths_mask: lenb,
model.training: True})
if ploss is None:
ploss = loss
else:
if loss >= ploss:
patience -= 1
if patience == 0:
early_stopping = True
print('early stopping')
break
else:
patience = max_patience
if step % 50 == 0:
end = time.time()
print('step: %d, loss: %2.6f, time: %2.3fs' % (step, loss, (end - start)))
step = sess.run(model.global_step)
# save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),
# global_step=step)
for _ in range(100):
try:
save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),
global_step=step)
except:
print('exception, retrying')
continue
break
print("Model saved in path: %s" % save_path)
preds, ndcg_1, var_preds, _ = test_model(sess, model, save_path, test_rj, test_docs, rl_lengths_test, qids_test,
labels_test_non_grouped, silent=False)
perfs.append(ndcg_1)
ckpt_paths.append(save_path)
return ckpt_paths, perfs
def train_eval_model(train_rj, train_docs, test_rj, test_docs, rl_lengths_train, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix=str(uuid.uuid4())):
rl_train_masks = compute_ranking_lists_rl_length_masks(rl_lengths_train, FLAGS.list_size_train)
print('max ranking list length in training data: %d' % max(rl_lengths_train))
print('max ranking list length in test data: %d' % max(rl_lengths_test))
# initialize graph and session
tf.reset_default_graph()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config, graph=tf.get_default_graph())
# initialize model
model = ReRanker(FLAGS.seed, FLAGS.learning_rate, coll_name=FLAGS.coll_name, det_model=FLAGS.det_model,
n_heads=FLAGS.n_heads,
num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,
loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,
norm_labels=FLAGS.norm_labels)
tf.set_random_seed(FLAGS.seed)
sess.run(model.init_op)
sess.graph.finalize()
start_training = time.time()
ckpt_paths, perfs = train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs,
rl_lengths_test, labels_test_non_grouped, qids_test, model_suffix)
print('Model trained in: %2.4fs' % (time.time() - start_training))
# load and evaluate best model
best_model_path = ckpt_paths[np.argmax(perfs)]
print('Best ckpt model path: %s' % best_model_path)
return best_model_path, sess, model
def run():
fold_folders = ['Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5']
# fold_folders = ['Fold1']
all_preds = []
all_rjs = []
all_qids_test = []
all_qids_test_non_g = []
all_dids_test = []
all_lab_test_non_grouped = []
all_rl_lengths = []
perfs_across_folds = {}
for fold_f in fold_folders:
ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val_non_grouped, lab_test_non_grouped, qids_val, qids_test = read_data(data_folder=FLAGS.data_folder,
fold_f=fold_f)
# print(qids_test)
best_model_path, sess, model = train_eval_model(all_labels_train, ranking_lists_train, all_labels_val,
ranking_lists_val,
rl_lengths_train, rl_lengths_val, lab_val_non_grouped, qids_val)
avg_preds, ndcg_1, var_preds, all_perf = test_model(sess, model, best_model_path, all_labels_test,
ranking_lists_test, rl_lengths_test, qids_test,
lab_test_non_grouped)
all_preds.extend(avg_preds)
all_rjs.extend(all_labels_test)
all_qids_test.extend(resp_qids_test)
all_qids_test_non_g.extend(qids_test)
all_dids_test.extend(resp_dids_test)
all_lab_test_non_grouped.extend(lab_test_non_grouped)
all_rl_lengths.extend(rl_lengths_test)
for k, v in all_perf.items():
if k in perfs_across_folds.keys():
perfs_across_folds[k].append(v)
else:
perfs_across_folds[k] = [v]
for k, v in perfs_across_folds.items():
print('{}: {}'.format(k, np.mean(v)))
# save_model((all_preds, all_rjs, all_qids_test, all_dids_test, all_qids_test_non_g, all_lab_test_non_grouped),
# './output/final_preds_data_{}_{}_{}.hkl'.format(FLAGS.coll_name, FLAGS.loss, FLAGS.simulate_labels))
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(all_qids_test_non_g, all_lab_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
all_rjs = np.array(all_rjs) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
ideal_rel_j_lists = np.array(ideal_rel_j_lists) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
print('\nFINAL PERF AVGD ACROSS FOLDS:')
# import pdb
# pdb.set_trace()
compute_perf_metrics(all_preds, all_rjs, ideal_rel_j_lists, False, all_rl_lengths, max_rj=2.0)
create_trec_eval_format_run_qrels(all_preds, all_dids_test, all_qids_test, all_rjs,
'DASALC_{}_loss={}_simulate_labels={}_det_model={}'.format(FLAGS.coll_name,
FLAGS.loss,
FLAGS.simulate_labels,
FLAGS.det_model),
'./output')
return
def create_trec_format_run(qids, dids, preds, ofpath):
out = open(ofpath, 'w')
for ranked_list_idx in range(len(preds)):
sorted_indices = np.argsort(preds[ranked_list_idx])
for item_idx in sorted_indices:
run_line = '{} Q0 {} {} {} {}\n'.format(qids[ranked_list_idx], dids[ranked_list_idx][item_idx],
item_idx + 1, preds[ranked_list_idx][item_idx], 'PFusion')
out.write(run_line)
out.close()
def flatten_stuff_provide_fake_qids(all_preds, all_rjs):
preds = []
labels = []
qids = []
for i in range(len(all_preds)):
preds.extend(all_preds[i])
labels.extend(all_rjs[i])
qids.extend([i] * len(all_preds[i]))
return np.array(preds), np.array(labels), np.array(qids)
if __name__ == '__main__':
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
arg_parser = argparse.ArgumentParser()
add_arguments(arg_parser)
FLAGS, unparsed = arg_parser.parse_known_args()
for arg in vars(FLAGS):
print(arg, ":", getattr(FLAGS, arg))
if not os.path.exists(FLAGS.model_ckpt_path):
os.makedirs(FLAGS.model_ckpt_path)
np.random.seed(FLAGS.seed)
tf.random.set_random_seed(FLAGS.seed)
run()
print(FLAGS.loss)
print('DONE')
| 47.297945 | 126 | 0.644269 | import argparse
import logging
import os
import time
import uuid
import numpy as np
import pyltr
import tensorflow as tf
from evaluation import compute_mean_ndcg, compute_perf_metrics, create_trec_eval_format_run_qrels
from lambdamart import compute_lambdamart_preds
from dasalc_model import ReRanker
from simulate_unsupervised_rj import compare_artif_rj_with_real_ones, sample_labels
from utils import load_model, pad_list, save_model
flags = tf.app.flags
FLAGS = flags.FLAGS
def add_arguments(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument("--coll_name", type=str, default='MQ2007', help="Collection name")
parser.add_argument("--data_folder", type=str, default='../LETOR_data/MQ2007/', help="Data folder.")
parser.add_argument("--simulate_labels", type=str, default=False,
help="Whether to train with simulated labels or not.")
parser.add_argument("--expand_training_data", type=str, default=False,
help="Whether to expand training data or not.")
parser.add_argument("--det_model", type=str, default=True, help="Whether to use probabilistic layers or not.")
parser.add_argument("--rerank_lambdamart", type=str, default=False,
help="Whether to rerank lambdamart preds or from scratch.")
parser.add_argument("--lambdamart_preds_path", type=str, default='../LETOR_data/MQ2007/lambdamart_runs',
help="LM data folder.")
parser.add_argument("--seed", type=float, default=0, help="The random seed to use.")
parser.add_argument("--n_binomial_samples", type=float, default=32,
help="The number of binomial samples to simulate.")
parser.add_argument("--loss", type=str, default='ApproxNDCG_G', help="The loss to use to train the model.")
parser.add_argument("--norm_labels", type=bool, default=False,
help="Whether to normalize within [0,1] the relevance labels.")
parser.add_argument("--num_features", type=int, default=46, help="Number of features per document.")
parser.add_argument("--num_epochs", type=int, default=100, help="The number of epochs for training.")
parser.add_argument("--n_heads", type=int, default=2, help="Num heads.")
parser.add_argument("--batch_size", type=int, default=4, help="The batch size for training.")
ser.add_argument("--list_size_test", type=int, default=147, help="List size.")
parser.add_argument("--learning_rate", type=float, default=1e-3, help="Learning rate for optimizer.")
parser.add_argument("--model_ckpt_path", type=str, default='./output/chkpts/',
help="Output path for checkpoint saving.")
def remove_queries_without_rel_docs(rj, docs, rl_lengths, dids):
indices_to_remove = []
for i in range(len(rj)):
if max(rj[i]) == 0:
indices_to_remove.append(i)
rj = [rj[i] for i in range(len(rj)) if i not in indices_to_remove]
docs = [docs[i] for i in range(len(docs)) if i not in indices_to_remove]
rl_lengths = [rl_lengths[i] for i in range(len(rl_lengths)) if i not in indices_to_remove]
dids = [dids[i] for i in range(len(dids)) if i not in indices_to_remove]
return rj, docs, rl_lengths, dids
def group_docs_with_lambdamart_preds(preds, qids, docs, labels, max_list_size):
grouped = {}
for i in range(len(qids)):
if qids[i] in grouped.keys():
grouped[qids[i]].append((preds[i], docs[i], labels[i]))
else:
grouped[qids[i]] = [(preds[i], docs[i], labels[i])]
grouped_docs = []
grouped_labels = []
rl_lengths = []
for group in grouped.values():
g = np.array(group)
lmp = g[:, 0]
indices = np.argsort(-lmp)
ranked_list = list(g[:, 1][indices])
ranked_labels = list(g[:, 2][indices])
while len(ranked_list) < max_list_size:
ranked_list.append(np.zeros(FLAGS.num_features))
ranked_labels.append(0.0)
ranked_list = ranked_list[:max_list_size]
ranked_labels = ranked_labels[:max_list_size]
grouped_docs.append(ranked_list)
grouped_labels.append(ranked_labels)
rl_lengths.append(min(max_list_size, len(lmp)))
return grouped_docs, grouped_labels, rl_lengths
def read_data(data_folder, fold_f):
training_file_path = os.path.join(os.path.join(data_folder, fold_f), 'train.txt')
valid_file_path = os.path.join(os.path.join(data_folder, fold_f), 'vali.txt')
test_file_path = os.path.join(os.path.join(data_folder, fold_f), 'test.txt')
docs_train, lab_train, qids_train, _ = pyltr.data.letor.read_dataset(open(training_file_path))
docs_val, lab_val, qids_val, _ = pyltr.data.letor.read_dataset(open(valid_file_path))
docs_test, lab_test, qids_test, _ = pyltr.data.letor.read_dataset(open(test_file_path))
dids_train = ['fake_did_{}'.format(i) for i in range(len(docs_train))]
dids_test = ['fake_did_{}'.format(i) for i in range(len(docs_test))]
dids_val = ['fake_did_{}'.format(i) for i in range(len(docs_val))]
max_l = np.max(lab_train)
print('max label: {}'.format(max_l))
lab_train = np.array(lab_train) / max_l
lab_val = np.array(lab_val) / max_l
lab_test = np.array(lab_test) / max_l
assert 0 <= max(lab_test) <= 1
assert 0 <= max(lab_train) <= 1
assert 0 <= max(lab_val) <= 1
ranking_lists_train, all_labels_train, rl_lengths_train, resp_qids_train, resp_dids_train = \
group_data_in_ranking_lists(docs_train, lab_train, qids_train, dids_train, FLAGS.list_size_test)
ranking_lists_val, all_labels_val, rl_lengths_val, resp_qids_val, resp_dids_val = \
group_data_in_ranking_lists(docs_val, lab_val, qids_val, dids_val, FLAGS.list_size_test)
ranking_lists_test, all_labels_test, rl_lengths_test, resp_qids_test, resp_dids_test = \
group_data_in_ranking_lists(docs_test, lab_test, qids_test, dids_test, FLAGS.list_size_test)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
if FLAGS.simulate_labels:
artif_labels = sample_labels(all_labels_train, rl_lengths_train, FLAGS.n_binomial_samples)
compare_artif_rj_with_real_ones(artif_labels, all_labels_train, rl_lengths_train)
all_labels_train = artif_labels
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
if FLAGS.expand_training_data:
ranking_lists_train, all_labels_train, rl_lengths_train = augment_training_data(ranking_lists_train,
all_labels_train,
rl_lengths_train)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
else:
FLAGS.list_size_train = FLAGS.list_size_test
return ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val, lab_test, qids_val, qids_test
def augment_training_data(training_docs, training_rj, rl_lengths):
training_rj = np.array(training_rj)
rl_lengths = np.array(rl_lengths)
n_samples_per_rl = 5
new_ranked_lists = []
new_rj = []
new_lengths = []
for i in range(len(training_docs)):
docs_to_sample = np.array(training_docs[i][:rl_lengths[i]])
for _ in range(n_samples_per_rl):
sel_indices = np.random.choice([idx for idx in range(len(docs_to_sample))], size=FLAGS.list_size_train,
replace=True)
new_ranked_lists.append(docs_to_sample[sel_indices])
new_rj.append(training_rj[i][sel_indices])
new_lengths.append(FLAGS.list_size_train)
return new_ranked_lists, new_rj, new_lengths
def load_lambdaMART_preds(fold_f, lambdamart_preds_path):
test_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + fold_f + '.hkl')
if not os.path.isfile(test_preds_path):
compute_lambdamart_preds(FLAGS)
test_preds = load_model(test_preds_path)
training_folds = []
validation_folds = []
if fold_f == 'Fold1':
training_folds = ['Fold2', 'Fold3', 'Fold4']
validation_folds = ['Fold5']
elif fold_f == 'Fold2':
training_folds = ['Fold3', 'Fold4', 'Fold5']
validation_folds = ['Fold1']
elif fold_f == 'Fold3':
training_folds = ['Fold4', 'Fold5', 'Fold1']
validation_folds = ['Fold2']
elif fold_f == 'Fold4':
training_folds = ['Fold5', 'Fold1', 'Fold2']
validation_folds = ['Fold3']
elif fold_f == 'Fold5':
training_folds = ['Fold1', 'Fold2', 'Fold3']
validation_folds = ['Fold4']
training_preds = []
for ff in training_folds:
tmp_model_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + ff + '.hkl')
training_preds.extend(load_model(tmp_model_path))
val_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + validation_folds[0] + '.hkl')
val_preds = load_model(val_preds_path)
return training_preds, test_preds, val_preds
def group_data_in_ranking_lists(vectors, labels, qids, dids, list_size):
assert len(qids) == len(labels)
assert len(qids) == len(vectors)
data_indices_grouped_by_qid = {}
for i in range(len(qids)):
curr_qid = qids[i]
if curr_qid not in data_indices_grouped_by_qid.keys():
data_indices_grouped_by_qid[curr_qid] = [i]
else:
data_indices_grouped_by_qid[curr_qid].append(i)
print('mean ranking list length: %2.4f' % np.mean([len(item) for item in data_indices_grouped_by_qid.values()]))
print('max ranking list length: %2.4f' % np.max([len(item) for item in data_indices_grouped_by_qid.values()]))
print('min ranking list length: %2.4f' % np.min([len(item) for item in data_indices_grouped_by_qid.values()]))
ranking_lists = []
all_labels = []
rl_lengths = []
resp_qids = []
all_dids = []
for qid, indices_group in data_indices_grouped_by_qid.items():
curr_dids = [dids[i] for i in indices_group]
vecs = [vectors[i] for i in indices_group]
curr_labels = [labels[i] for i in indices_group]
original_rl_len = len(curr_labels)
vecs = pad_list(vecs, list_size)
curr_labels = curr_labels[0: min(list_size, len(curr_labels))]
curr_labels = curr_labels + [0.0] * (list_size - len(curr_labels))
resp_qids.append(qid)
curr_dids = curr_dids[0: min(list_size, len(curr_dids))]
curr_dids.extend('padding_did_{}'.format(i) for i in range(list_size - len(curr_dids)))
all_labels.append(curr_labels)
ranking_lists.append(vecs)
all_dids.append(curr_dids)
rl_lengths.append(min(list_size, original_rl_len))
return ranking_lists, all_labels, rl_lengths, resp_qids, all_dids
def group_rj_in_ranking_lists_no_pad_trim(qids, labs):
ranking_lists = {}
for i in range(len(qids)):
qid = qids[i]
label = labs[i]
if qid in ranking_lists.keys():
ranking_lists[qid].append(label)
else:
ranking_lists[qid] = [label]
doc_scores = []
doc_rj = []
for k, ranking_list in ranking_lists.items():
curr_scores = []
curr_rj = []
for i in range(len(ranking_list)):
curr_rj.append(ranking_list[i])
doc_scores.append(curr_scores)
doc_rj.append(curr_rj)
return doc_rj
def compute_ranking_lists_rl_length_masks(rl_lengths, list_size):
rl_masks = []
for i in range(len(rl_lengths)):
curr_v = np.zeros(list_size)
for j in range(min(len(curr_v), rl_lengths[i])):
curr_v[j] = 1
rl_masks.append(curr_v)
return rl_masks
def remove_training_rl_without_rel_docs(train_rj, train_docs, rl_lengths_train):
indices_to_remove_train = []
for i in range(len(train_rj)):
if max(train_rj[i]) == 0:
indices_to_remove_train.append(i)
train_rj = [train_rj[i] for i in range(len(train_rj)) if i not in indices_to_remove_train]
train_docs = [train_docs[i] for i in range(len(train_docs)) if i not in indices_to_remove_train]
rl_lengths_train = [rl_lengths_train[i] for i in range(len(rl_lengths_train)) if i not in indices_to_remove_train]
return train_rj, train_docs, rl_lengths_train
def test_model(sess, model, model_path, test_rj, test_docs, rl_lengths, qids_test, labels_test_non_grouped,
silent=False):
rl_test_masks = compute_ranking_lists_rl_length_masks(rl_lengths, FLAGS.list_size_test)
tf.set_random_seed(FLAGS.seed)
model.saver.restore(sess, model_path)
sess.graph.finalize()
msamples = 50
if FLAGS.det_model:
msamples = 1
all_preds = np.zeros(shape=(msamples, len(test_docs), FLAGS.list_size_test))
for k in range(msamples):
scores = sess.run(model.logits,
{model.training: False, model.input_docs: test_docs, model.rl_lengths_mask: rl_test_masks})
if FLAGS.loss == 'ML':
all_preds[k] = np.argmax(scores, axis=-1)
else:
all_preds[k] = scores
avg_preds = np.mean(all_preds, axis=0)
var_preds = np.var(all_preds, axis=0)
for i in range(len(rl_test_masks)):
for j in range(len(rl_test_masks[i])):
if rl_test_masks[i][j] == 0:
rl_test_masks[i][j] = 0
else:
rl_test_masks[i][j] = 0
avg_preds = rl_test_masks + avg_preds
var_preds = rl_test_masks + var_preds
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(qids_test, labels_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
ndcg_1, base_1 = compute_mean_ndcg(avg_preds, test_rj, ideal_rel_j_lists, 1)
return avg_preds, ndcg_1, var_preds, compute_perf_metrics(avg_preds, test_rj, ideal_rel_j_lists, silent, rl_lengths)
def get_batches(all_docs, all_labels, rl_lengths_mask):
db = []
rb = []
lb = []
for i in range(len(all_docs)):
db.append(all_docs[i])
rb.append(all_labels[i])
lb.append(rl_lengths_mask[i])
if len(db) == FLAGS.batch_size:
yield db, rb, lb
db = []
rb = []
lb = []
if len(db) > 0:
yield db, rb, lb
def train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix):
ckpt_paths = []
perfs = []
max_patience = 20
patience = 20
ploss = None
early_stopping = False
for epoch in range(1, FLAGS.num_epochs + 1):
if early_stopping:
break
print('*** EPOCH: %d/%d' % (epoch, FLAGS.num_epochs))
start = time.time()
for db, rjb, lenb in get_batches(train_docs, train_rj, rl_train_masks):
_, step, loss = sess.run(
[model.train_op, model.global_step, model.loss],
feed_dict={model.input_docs: db,
model.relevance_judgments: rjb,
model.rl_lengths_mask: lenb,
model.training: True})
if ploss is None:
ploss = loss
else:
if loss >= ploss:
patience -= 1
if patience == 0:
early_stopping = True
print('early stopping')
break
else:
patience = max_patience
if step % 50 == 0:
end = time.time()
print('step: %d, loss: %2.6f, time: %2.3fs' % (step, loss, (end - start)))
step = sess.run(model.global_step)
for _ in range(100):
try:
save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),
global_step=step)
except:
print('exception, retrying')
continue
break
print("Model saved in path: %s" % save_path)
preds, ndcg_1, var_preds, _ = test_model(sess, model, save_path, test_rj, test_docs, rl_lengths_test, qids_test,
labels_test_non_grouped, silent=False)
perfs.append(ndcg_1)
ckpt_paths.append(save_path)
return ckpt_paths, perfs
def train_eval_model(train_rj, train_docs, test_rj, test_docs, rl_lengths_train, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix=str(uuid.uuid4())):
rl_train_masks = compute_ranking_lists_rl_length_masks(rl_lengths_train, FLAGS.list_size_train)
print('max ranking list length in training data: %d' % max(rl_lengths_train))
print('max ranking list length in test data: %d' % max(rl_lengths_test))
tf.reset_default_graph()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config, graph=tf.get_default_graph())
model = ReRanker(FLAGS.seed, FLAGS.learning_rate, coll_name=FLAGS.coll_name, det_model=FLAGS.det_model,
n_heads=FLAGS.n_heads,
num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,
loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,
norm_labels=FLAGS.norm_labels)
tf.set_random_seed(FLAGS.seed)
sess.run(model.init_op)
sess.graph.finalize()
start_training = time.time()
ckpt_paths, perfs = train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs,
rl_lengths_test, labels_test_non_grouped, qids_test, model_suffix)
print('Model trained in: %2.4fs' % (time.time() - start_training))
best_model_path = ckpt_paths[np.argmax(perfs)]
print('Best ckpt model path: %s' % best_model_path)
return best_model_path, sess, model
def run():
fold_folders = ['Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5']
all_preds = []
all_rjs = []
all_qids_test = []
all_qids_test_non_g = []
all_dids_test = []
all_lab_test_non_grouped = []
all_rl_lengths = []
perfs_across_folds = {}
for fold_f in fold_folders:
ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val_non_grouped, lab_test_non_grouped, qids_val, qids_test = read_data(data_folder=FLAGS.data_folder,
fold_f=fold_f)
best_model_path, sess, model = train_eval_model(all_labels_train, ranking_lists_train, all_labels_val,
ranking_lists_val,
rl_lengths_train, rl_lengths_val, lab_val_non_grouped, qids_val)
avg_preds, ndcg_1, var_preds, all_perf = test_model(sess, model, best_model_path, all_labels_test,
ranking_lists_test, rl_lengths_test, qids_test,
lab_test_non_grouped)
all_preds.extend(avg_preds)
all_rjs.extend(all_labels_test)
all_qids_test.extend(resp_qids_test)
all_qids_test_non_g.extend(qids_test)
all_dids_test.extend(resp_dids_test)
all_lab_test_non_grouped.extend(lab_test_non_grouped)
all_rl_lengths.extend(rl_lengths_test)
for k, v in all_perf.items():
if k in perfs_across_folds.keys():
perfs_across_folds[k].append(v)
else:
perfs_across_folds[k] = [v]
for k, v in perfs_across_folds.items():
print('{}: {}'.format(k, np.mean(v)))
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(all_qids_test_non_g, all_lab_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
all_rjs = np.array(all_rjs) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
ideal_rel_j_lists = np.array(ideal_rel_j_lists) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
print('\nFINAL PERF AVGD ACROSS FOLDS:')
compute_perf_metrics(all_preds, all_rjs, ideal_rel_j_lists, False, all_rl_lengths, max_rj=2.0)
create_trec_eval_format_run_qrels(all_preds, all_dids_test, all_qids_test, all_rjs,
'DASALC_{}_loss={}_simulate_labels={}_det_model={}'.format(FLAGS.coll_name,
FLAGS.loss,
FLAGS.simulate_labels,
FLAGS.det_model),
'./output')
return
def create_trec_format_run(qids, dids, preds, ofpath):
out = open(ofpath, 'w')
for ranked_list_idx in range(len(preds)):
sorted_indices = np.argsort(preds[ranked_list_idx])
for item_idx in sorted_indices:
run_line = '{} Q0 {} {} {} {}\n'.format(qids[ranked_list_idx], dids[ranked_list_idx][item_idx],
item_idx + 1, preds[ranked_list_idx][item_idx], 'PFusion')
out.write(run_line)
out.close()
def flatten_stuff_provide_fake_qids(all_preds, all_rjs):
preds = []
labels = []
qids = []
for i in range(len(all_preds)):
preds.extend(all_preds[i])
labels.extend(all_rjs[i])
qids.extend([i] * len(all_preds[i]))
return np.array(preds), np.array(labels), np.array(qids)
if __name__ == '__main__':
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
arg_parser = argparse.ArgumentParser()
add_arguments(arg_parser)
FLAGS, unparsed = arg_parser.parse_known_args()
for arg in vars(FLAGS):
print(arg, ":", getattr(FLAGS, arg))
if not os.path.exists(FLAGS.model_ckpt_path):
os.makedirs(FLAGS.model_ckpt_path)
np.random.seed(FLAGS.seed)
tf.random.set_random_seed(FLAGS.seed)
run()
print(FLAGS.loss)
print('DONE')
| true | true |
f7f61d2a04987f6fe04d2820f1e8691e461e18dd | 101,179 | py | Python | tensorflow/python/keras/engine/training.py | tatatodd/tensorflow | e469ccf50312fef277eb0fe55b04992f7974a35e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/training.py | tatatodd/tensorflow | e469ccf50312fef277eb0fe55b04992f7974a35e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/training.py | tatatodd/tensorflow | e469ccf50312fef277eb0fe55b04992f7974a35e | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# Create a cache for iterator get_next op.
self._iterator_get_next = weakref.WeakKeyDictionary()
# Create a cache for dataset - uninitialized iterators
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
# This flag must be disabled upon model mutation, such as changing the model
# layers or recompiling the model to use a different optimizer. New function
# definitions are generated whenever this flag is disabled, ensuring that
# internal graph functions are always using the current model structure.
self._built_graph_functions = False
def _set_sample_weight_attributes(self, sample_weight_mode,
skip_target_weighing_indices):
"""Sets sample weight related attributes on the model."""
sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(
self.output_names, sample_weight_mode, skip_target_weighing_indices)
self.sample_weights = sample_weights
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = [
sample_weight_modes[i]
for i in range(len(self.outputs))
if i not in skip_target_weighing_indices
]
self._feed_sample_weights = [
sample_weights[i]
for i in range(len(sample_weights))
if i not in skip_target_weighing_indices
]
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
"""Caches metric name and function attributes for every model output."""
output_shapes = [
None if output is None else output.get_shape().as_list()
for output in self.outputs
]
self._per_output_metrics = training_utils.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions)
self._per_output_weighted_metrics = \
training_utils.collect_per_output_metric_info(
weighted_metrics, self.output_names, output_shapes,
self.loss_functions, self.sample_weights)
def _add_unique_metric_name(self, metric_name, output_index):
"""Makes the metric name unique and adds it to the model's metric name list.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Arguments:
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'.
output_index: The index of the model output for which the metric name is
being added.
Returns:
string, name of the model's unique metric name
"""
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _init_metric_attributes(self):
"""Initialized model metric attributes."""
self.metrics_names = ['loss']
self.metrics_tensors = []
self.metrics_updates = []
self.stateful_metric_names = []
self.stateful_metric_functions = []
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric
attributes are added.
"""
for metric_name, metric_fn in metrics_dict.items():
metric_name = self._add_unique_metric_name(metric_name, output_index)
# Keep track of metric name.
self.metrics_names.append(metric_name)
# Keep track of stateful metric attributes (name and metric function).
if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:
self.stateful_metric_names.append(metric_name)
self.stateful_metric_functions.append(metric_fn)
def _set_metric_attributes(self, outputs, skip_target_indices=None):
"""Sets the metric attributes on the model for all the model outputs."""
skip_target_indices = skip_target_indices or []
for i in range(len(outputs)):
if i in skip_target_indices:
continue
self._set_per_output_metric_attributes(self._per_output_metrics[i], i)
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i)
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None):
"""Calls metric functions for a single output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output.
y_pred: Predicted output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric_name, metric_fn in metrics_dict.items():
with K.name_scope(metric_name):
if isinstance(metric_fn, metrics_module.Metric):
# Call the stateful metric function.
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update shape of weights if possible before adding mask.
# Update dimensions of weights to match with mask if possible.
mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
mask, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(weights, mask)
except ValueError:
pass
# TODO(psv): Handle case when mask and weight shapes are not
# compatible.
weights *= mask
metric_result = metric_fn(y_true, y_pred, weights)
else:
# Call the stateless metric function.
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=mask)
if not context.executing_eagerly():
# Keep track of metric result tensor.
self.metrics_tensors.append(metric_result)
metric_results.append(metric_result)
is_stateful = isinstance(metric_fn,
base_layer.Layer) and metric_fn.stateful
if is_stateful and not context.executing_eagerly():
# Keep track of updates created by stateful metrics.
self.metrics_updates += metric_fn.updates
return metric_results
def _handle_metrics(self,
outputs,
skip_target_indices=None,
targets=None,
sample_weights=None,
masks=None):
"""Handles calling metric functions.
Arguments:
outputs: List of outputs (predictions).
skip_target_indices: Optional. List of target ids to skip.
targets: List of targets.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
Returns:
A list of metric result tensors.
"""
skip_target_indices = skip_target_indices or []
metric_results = []
with K.name_scope('metrics'):
for i in range(len(outputs)):
if i in skip_target_indices:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
metric_results.extend(
self._handle_per_output_metrics(self._per_output_metrics[i], target,
output, output_mask))
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i]))
return metric_results
@checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/api_docs/python/tf/keras/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/api_docs/python/tf/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: The DistributionStrategy instance that we want to use to
distribute the training of the model.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
# The correct graph function may have changed,
# already-built ones must be updated
self._built_graph_functions = False
# Validate that arguments passed by the user to `compile` are supported by
# DistributionStrategy.
if distribute:
if not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise NotImplementedError(
'optimizer must be an instance of '
'tf.train.Optimizer, not a %s' % type(optimizer))
if context.executing_eagerly():
raise NotImplementedError('DistributionStrategy is not supported '
'when eager execution is enabled.')
if sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'DistributionStrategy.')
if weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'DistributionStrategy.')
if target_tensors:
raise ValueError('target_tensors is not supported with '
'DistributionStrategy.')
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError(
'optimizer must be an instance of tf.train.Optimizer, not '
'a %s' % type(optimizer))
self.optimizer = optimizers.get(optimizer)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's checkpointable.
if isinstance(self.optimizer, checkpointable.CheckpointableBase):
self._track_checkpointable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
# Set DistributionStrategy specific parameters.
self._distribution_strategy = distribute
# Reset the value of grouped_model
self._grouped_model = None
if self._distribution_strategy is not None:
distributed_training_utils.configure_and_create_session(
self._distribution_strategy)
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name +
'" missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not context.executing_eagerly():
masks = [getattr(x, '_keras_mask', None) for x in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# Initialize model metric attributes.
self._init_metric_attributes()
# Initialization for Eager mode execution.
if context.executing_eagerly():
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif tensor_util.is_tensor(target_tensors):
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a list or tuple or '
'dict or a single tensor, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = self.sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
# Invoke metric functions for all the outputs.
self._handle_metrics(
self.outputs,
masks=masks,
targets=self.targets,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.log_first_n(
logging.WARN, 'Discrepancy between trainable weights and collected'
' trainable weights, did you set `model.trainable`'
' without calling `model.compile` after ?', 1)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _get_iterator_get_next_tensors(self, iterator):
get_next_op = self._iterator_get_next.get(iterator, None)
if get_next_op is None:
get_next_op = iterator.get_next()
self._iterator_get_next[iterator] = get_next_op
return get_next_op
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
"""Runs validation checks on input and target data passed by the user.
This is called when using DistributionStrategy to train, evaluate or serve
the model.
Args:
x: Input data. A numpy array or `tf.data` dataset.
y: Target data. A numpy array or None if x is a `tf.data` dataset.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
Returns:
Iterator for reading the dataset `x`.
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using DistributionStrategy.')
if (sample_weight is not None and sample_weight.all() and
self._distribution_strategy.__class__.__name__ == 'TPUStrategy'):
raise NotImplementedError('`sample_weight` is currently not supported '
'when using TPUStrategy.')
# Validates `steps` argument right at the beginning since we use it to
# construct the dataset object.
# TODO(anjalisridhar): Remove this check once we refactor the
# _standardize_user_data code path. This check is already present elsewhere
# in the codebase.
if check_steps and isinstance(x, dataset_ops.Dataset) and steps is None:
raise ValueError('When using Datasets as input, '
'you should specify the `{steps_name}` argument.'
.format(steps_name=steps_name))
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
assert steps is not None
x_shape = first_x_value.shape
if batch_size is None:
batch_size = distributed_training_utils.get_batch_size(
self._distribution_strategy.num_replicas, x_shape[0], steps)
# We need to use the drop_remainder argument to allow for a static
# input shape which is required for TPUs.
drop_remainder = self._distribution_strategy.require_static_shapes
if y is not None:
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
var_y = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, y)
if sample_weight is not None:
var_sample_weights = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, sample_weight)
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y,
var_sample_weights))
else:
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
if shuffle:
# 1024 is a good buffer size since it is much larger than the average
# batch size provided by the user and provides sufficient randomness.
# One thing to keep in mind is the memory usage based on the size of
# each sample.
x = x.shuffle(1024)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
y = None
sample_weight = None
else:
# This case is for the predict call where the dataset only contains
# inputs and no targets, i.e. it does not return a tuple
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
x = dataset_ops.Dataset.from_tensor_slices(var_x)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
assert isinstance(x, dataset_ops.Dataset)
# TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a
# function which returns a Dataset. Currently distribute_dataset() only
# accepts a function that returns a Dataset. Once we add support for being
# able to clone a Dataset on multiple workers we can remove this lambda.
result = self._distribution_strategy.distribute_dataset(lambda: x)
iterator = result.make_initializable_iterator()
with self._distribution_strategy.scope():
K.get_session().run(iterator.initializer)
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
return iterator
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
Returns:
A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict
or not), target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if self._distribution_strategy:
iterator = self._distribution_standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=check_steps,
steps_name=steps_name,
steps=steps,
validation_split=validation_split,
shuffle=shuffle)
return iterator, None, None
if isinstance(x, dataset_ops.Dataset):
if context.executing_eagerly():
x = x.make_one_shot_iterator()
else:
if x in self._dataset_iterator_cache:
x = self._dataset_iterator_cache[x]
else:
iterator = x.make_initializable_iterator()
self._dataset_iterator_cache[x] = iterator
x = iterator
K.get_session().run(x.initializer)
# Validates `steps` argument based on x's type.
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)
is_x_iterator = isinstance(x, iterator_ops.Iterator)
# Validate user inputs when data is given as a dataset or dataset iterator.
if is_x_iterator or is_x_eager_iterator:
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# For eager iterators, when we have to process multiple batches of samples,
# we will standardize the data when we actually loop over iterator and get
# the batches. For now, we just return the iterator as is.
if is_x_eager_iterator and steps is not None:
return x, y, sample_weight
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if is_x_iterator or is_x_eager_iterator:
try:
if is_x_iterator:
next_element = self._get_iterator_get_next_tensors(x)
else:
next_element = x.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3'
'elements: (input, target) or (input, target, sample_weights)'
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
else:
x, y, sample_weight = next_element
else:
x = next_element
x, y, sample_weights = self._standardize_weights(x, y, sample_weight,
class_weight, batch_size)
return x, y, sample_weights
def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,
batch_size=None,):
# TODO(sourabhbajaj): Split input validation from weight standardization.
if sample_weight is not None and class_weight is not None:
logging.warning(
'Received both a `sample_weight` and `class_weight` argument. '
'The `class_weight` argument will be ignored.')
# First, we build/compile the model on the fly if necessary.
all_inputs = []
is_build_called = False
is_compile_called = False
dict_inputs = False
if not self.inputs:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
dict_inputs = True
keys = sorted(x.keys())
all_inputs = [x[k] for k in keys]
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
is_build_called = True
self._set_inputs(x)
else:
dict_inputs = isinstance(self.inputs, dict)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs += list(y)
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs.append(y)
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
is_compile_called = True
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if (not context.executing_eagerly() and is_build_called and
is_compile_called and
any(tensor_util.is_tensor(v) for v in all_inputs)):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first':
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs:
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
@checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, outputs=None, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
outputs: None, a data tensor, or a list of tensors. If None, the
outputs will be determined by invoking `self.call()`, otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If dict inputs are passed to a Sequential Model where the
first layer isn't FeatureLayer.
"""
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
self.build(input_shape=input_shape)
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesnt have FeatureLayer as the first layer '
'is an error')
input_shape = (None,)
self.build(input_shape=input_shape)
else:
input_shape = (None,) + inputs.shape[1:]
self.build(input_shape=input_shape)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(k)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
graph = K.get_graph()
with graph.as_default():
if self._expects_training_arg:
outputs = self.call(inputs, training=training)
else:
outputs = self.call(inputs)
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample weights)`.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, dataset
iterator, generator, or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/api_docs/python/tf/keras/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset or a dataset iterator
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` must be provided.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, dataset iterator, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `validation_data` is provided and
is a dataset or dataset iterator. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.fit_generator(
x,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_callbacks(callbacks)
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if not steps_per_epoch and isinstance(first_x_value, np.ndarray):
steps_per_epoch = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split,
shuffle=shuffle)
# Prepare validation data.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.Dataset)):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
# Validate and standardize validation data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
val_x, val_y, self._distribution_strategy)
first_valx_value = nest.flatten(val_x)[0]
if not validation_steps and isinstance(first_valx_value, np.ndarray):
validation_steps = distributed_training_utils.get_input_batch_params(
first_valx_value, batch_size, self._distribution_strategy)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
class_weight=class_weight,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
elif self._distribution_strategy:
return training_distributed.fit_loop(
self, x,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_iterator=val_x,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset or a dataset iterator, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.evaluate_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
if context.executing_eagerly():
return training_eager.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
elif self._distribution_strategy:
return training_distributed.test_loop(
self,
iterator=x,
verbose=verbose,
steps=steps)
else:
return training_arrays.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
if data_utils.is_generator_or_sequence(x):
return self.predict_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if self._distribution_strategy:
# Turn off prefetching since this is currently not deterministic. Once
# b/112498930 is fixed we can turn it back on.
# `_prefetch_on_device` is currently a property of only
# `MirroredStrategy`.
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = False # pylint: disable=protected-access
distributed_training_utils.validate_inputs(
x, None, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Validate and standardize user data.
# TODO(anjalisridhar): We don't pass batch_size here for some reason. This
# means that we end up calculating it twice which we should avoid.
x, _, _ = self._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
elif self._distribution_strategy:
results = training_distributed.predict_loop(
self, x, verbose=verbose, steps=steps)
# Turn prefetching back on since we turned it off previously.
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = True # pylint: disable=protected-access
return results
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`train_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`test_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_on_batch` is not supported for '
'models compiled with DistributionStrategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
if (isinstance(x, iterator_ops.EagerIterator) or
(isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):
inputs = training_utils.cast_if_floating_dtype(inputs)
else:
inputs = [
ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs
]
return self(inputs) # pylint: disable=not-callable
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`fit_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of `keras.utils.Sequence`
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: Verbosity mode, 0 or 1.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`evaluate_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of `keras.utils.Sequence` object in order to
avoid duplicate data when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
def _make_callback_model(self, grouped_model):
first_replicated_model = self._distribution_strategy.unwrap(
grouped_model)[0]
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with DistributionStrategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
# TODO(anjalisridhar): Right now the only attributes set are the layer and
# weights. We may need to set additional attributes as needed since we have
# not called compile on this model.
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted atttributes of the model that can be accessed by the user
# during a callback.
if item not in ['_setattr_tracking']:
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
| 43.517849 | 99 | 0.646379 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self._iterator_get_next = weakref.WeakKeyDictionary()
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
self._distribution_strategy = None
self._built_graph_functions = False
def _set_sample_weight_attributes(self, sample_weight_mode,
skip_target_weighing_indices):
sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(
self.output_names, sample_weight_mode, skip_target_weighing_indices)
self.sample_weights = sample_weights
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = [
sample_weight_modes[i]
for i in range(len(self.outputs))
if i not in skip_target_weighing_indices
]
self._feed_sample_weights = [
sample_weights[i]
for i in range(len(sample_weights))
if i not in skip_target_weighing_indices
]
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
output_shapes = [
None if output is None else output.get_shape().as_list()
for output in self.outputs
]
self._per_output_metrics = training_utils.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions)
self._per_output_weighted_metrics = \
training_utils.collect_per_output_metric_info(
weighted_metrics, self.output_names, output_shapes,
self.loss_functions, self.sample_weights)
def _add_unique_metric_name(self, metric_name, output_index):
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _init_metric_attributes(self):
self.metrics_names = ['loss']
self.metrics_tensors = []
self.metrics_updates = []
self.stateful_metric_names = []
self.stateful_metric_functions = []
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
for metric_name, metric_fn in metrics_dict.items():
metric_name = self._add_unique_metric_name(metric_name, output_index)
self.metrics_names.append(metric_name)
if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:
self.stateful_metric_names.append(metric_name)
self.stateful_metric_functions.append(metric_fn)
def _set_metric_attributes(self, outputs, skip_target_indices=None):
skip_target_indices = skip_target_indices or []
for i in range(len(outputs)):
if i in skip_target_indices:
continue
self._set_per_output_metric_attributes(self._per_output_metrics[i], i)
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i)
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None):
metric_results = []
for metric_name, metric_fn in metrics_dict.items():
with K.name_scope(metric_name):
if isinstance(metric_fn, metrics_module.Metric):
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
if weights is None:
weights = mask
else:
mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
mask, None, weights)
try:
weights = weights_broadcast_ops.broadcast_weights(weights, mask)
except ValueError:
pass
weights *= mask
metric_result = metric_fn(y_true, y_pred, weights)
else:
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=mask)
if not context.executing_eagerly():
self.metrics_tensors.append(metric_result)
metric_results.append(metric_result)
is_stateful = isinstance(metric_fn,
base_layer.Layer) and metric_fn.stateful
if is_stateful and not context.executing_eagerly():
self.metrics_updates += metric_fn.updates
return metric_results
def _handle_metrics(self,
outputs,
skip_target_indices=None,
targets=None,
sample_weights=None,
masks=None):
skip_target_indices = skip_target_indices or []
metric_results = []
with K.name_scope('metrics'):
for i in range(len(outputs)):
if i in skip_target_indices:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
metric_results.extend(
self._handle_per_output_metrics(self._per_output_metrics[i], target,
output, output_mask))
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i]))
return metric_results
@checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
self._built_graph_functions = False
if distribute:
if not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise NotImplementedError(
'optimizer must be an instance of '
'tf.train.Optimizer, not a %s' % type(optimizer))
if context.executing_eagerly():
raise NotImplementedError('DistributionStrategy is not supported '
'when eager execution is enabled.')
if sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'DistributionStrategy.')
if weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'DistributionStrategy.')
if target_tensors:
raise ValueError('target_tensors is not supported with '
'DistributionStrategy.')
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError(
'optimizer must be an instance of tf.train.Optimizer, not '
'a %s' % type(optimizer))
self.optimizer = optimizers.get(optimizer)
# to add a checkpoint dependency on the optimizer if it's checkpointable.
if isinstance(self.optimizer, checkpointable.CheckpointableBase):
self._track_checkpointable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
self._distribution_strategy = distribute
self._grouped_model = None
if self._distribution_strategy is not None:
distributed_training_utils.configure_and_create_session(
self._distribution_strategy)
if not self.built:
return
self._is_compiled = True
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name +
'" missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
if not context.executing_eagerly():
masks = [getattr(x, '_keras_mask', None) for x in self.outputs]
if not isinstance(masks, list):
masks = [masks]
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
self._init_metric_attributes()
if context.executing_eagerly():
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
self._cache_output_metric_attributes(metrics, weighted_metrics)
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif tensor_util.is_tensor(target_tensors):
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a list or tuple or '
'dict or a single tensor, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
self._cache_output_metric_attributes(metrics, weighted_metrics)
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = self.sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
for loss_tensor in self.losses:
total_loss += loss_tensor
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
self._handle_metrics(
self.outputs,
masks=masks,
targets=self.targets,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
self.total_loss = total_loss
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.log_first_n(
logging.WARN, 'Discrepancy between trainable weights and collected'
' trainable weights, did you set `model.trainable`'
' without calling `model.compile` after ?', 1)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
updates += self.get_updates_for(None)
updates += self.get_updates_for(self.inputs)
updates += self.metrics_updates
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _get_iterator_get_next_tensors(self, iterator):
get_next_op = self._iterator_get_next.get(iterator, None)
if get_next_op is None:
get_next_op = iterator.get_next()
self._iterator_get_next[iterator] = get_next_op
return get_next_op
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using DistributionStrategy.')
if (sample_weight is not None and sample_weight.all() and
self._distribution_strategy.__class__.__name__ == 'TPUStrategy'):
raise NotImplementedError('`sample_weight` is currently not supported '
'when using TPUStrategy.')
if check_steps and isinstance(x, dataset_ops.Dataset) and steps is None:
raise ValueError('When using Datasets as input, '
'you should specify the `{steps_name}` argument.'
.format(steps_name=steps_name))
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
assert steps is not None
x_shape = first_x_value.shape
if batch_size is None:
batch_size = distributed_training_utils.get_batch_size(
self._distribution_strategy.num_replicas, x_shape[0], steps)
drop_remainder = self._distribution_strategy.require_static_shapes
if y is not None:
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
var_y = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, y)
if sample_weight is not None:
var_sample_weights = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, sample_weight)
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y,
var_sample_weights))
else:
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))
if shuffle:
x = x.shuffle(1024)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
y = None
sample_weight = None
else:
var_x = distributed_training_utils.get_var_for_numpy(
self._distribution_strategy, x)
x = dataset_ops.Dataset.from_tensor_slices(var_x)
x = x.repeat()
x = x.batch(batch_size, drop_remainder=drop_remainder)
assert isinstance(x, dataset_ops.Dataset)
result = self._distribution_strategy.distribute_dataset(lambda: x)
iterator = result.make_initializable_iterator()
with self._distribution_strategy.scope():
K.get_session().run(iterator.initializer)
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
return iterator
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False):
if self._distribution_strategy:
iterator = self._distribution_standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=check_steps,
steps_name=steps_name,
steps=steps,
validation_split=validation_split,
shuffle=shuffle)
return iterator, None, None
if isinstance(x, dataset_ops.Dataset):
if context.executing_eagerly():
x = x.make_one_shot_iterator()
else:
if x in self._dataset_iterator_cache:
x = self._dataset_iterator_cache[x]
else:
iterator = x.make_initializable_iterator()
self._dataset_iterator_cache[x] = iterator
x = iterator
K.get_session().run(x.initializer)
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)
is_x_iterator = isinstance(x, iterator_ops.Iterator)
# Validate user inputs when data is given as a dataset or dataset iterator.
if is_x_iterator or is_x_eager_iterator:
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# For eager iterators, when we have to process multiple batches of samples,
# we will standardize the data when we actually loop over iterator and get
# the batches. For now, we just return the iterator as is.
if is_x_eager_iterator and steps is not None:
return x, y, sample_weight
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if is_x_iterator or is_x_eager_iterator:
try:
if is_x_iterator:
next_element = self._get_iterator_get_next_tensors(x)
else:
next_element = x.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3'
'elements: (input, target) or (input, target, sample_weights)'
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
else:
x, y, sample_weight = next_element
else:
x = next_element
x, y, sample_weights = self._standardize_weights(x, y, sample_weight,
class_weight, batch_size)
return x, y, sample_weights
def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,
batch_size=None,):
# TODO(sourabhbajaj): Split input validation from weight standardization.
if sample_weight is not None and class_weight is not None:
logging.warning(
'Received both a `sample_weight` and `class_weight` argument. '
'The `class_weight` argument will be ignored.')
# First, we build/compile the model on the fly if necessary.
all_inputs = []
is_build_called = False
is_compile_called = False
dict_inputs = False
if not self.inputs:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
dict_inputs = True
keys = sorted(x.keys())
all_inputs = [x[k] for k in keys]
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
is_build_called = True
self._set_inputs(x)
else:
dict_inputs = isinstance(self.inputs, dict)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs += list(y)
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs.append(y)
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
is_compile_called = True
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if (not context.executing_eagerly() and is_build_called and
is_compile_called and
any(tensor_util.is_tensor(v) for v in all_inputs)):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first':
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False,
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs:
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
@checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, outputs=None, training=None):
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
self.build(input_shape=input_shape)
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesnt have FeatureLayer as the first layer '
'is an error')
input_shape = (None,)
self.build(input_shape=input_shape)
else:
input_shape = (None,) + inputs.shape[1:]
self.build(input_shape=input_shape)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(k)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
graph = K.get_graph()
with graph.as_default():
if self._expects_training_arg:
outputs = self.call(inputs, training=training)
else:
outputs = self.call(inputs)
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.fit_generator(
x,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_callbacks(callbacks)
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if not steps_per_epoch and isinstance(first_x_value, np.ndarray):
steps_per_epoch = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split,
shuffle=shuffle)
# Prepare validation data.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.Dataset)):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
# Validate and standardize validation data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
val_x, val_y, self._distribution_strategy)
first_valx_value = nest.flatten(val_x)[0]
if not validation_steps and isinstance(first_valx_value, np.ndarray):
validation_steps = distributed_training_utils.get_input_batch_params(
first_valx_value, batch_size, self._distribution_strategy)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
class_weight=class_weight,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
elif self._distribution_strategy:
return training_distributed.fit_loop(
self, x,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_iterator=val_x,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.evaluate_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(
x, y, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
if context.executing_eagerly():
return training_eager.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
elif self._distribution_strategy:
return training_distributed.test_loop(
self,
iterator=x,
verbose=verbose,
steps=steps)
else:
return training_arrays.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
if data_utils.is_generator_or_sequence(x):
return self.predict_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if self._distribution_strategy:
# Turn off prefetching since this is currently not deterministic. Once
# b/112498930 is fixed we can turn it back on.
# `_prefetch_on_device` is currently a property of only
# `MirroredStrategy`.
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = False # pylint: disable=protected-access
distributed_training_utils.validate_inputs(
x, None, self._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Validate and standardize user data.
# TODO(anjalisridhar): We don't pass batch_size here for some reason. This
x, _, _ = self._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
elif self._distribution_strategy:
results = training_distributed.predict_loop(
self, x, verbose=verbose, steps=steps)
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = True
return results
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):
if self._distribution_strategy:
raise NotImplementedError('`train_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None):
if self._distribution_strategy:
raise NotImplementedError('`test_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
if self._distribution_strategy:
raise NotImplementedError('`predict_on_batch` is not supported for '
'models compiled with DistributionStrategy.')
inputs, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
if (isinstance(x, iterator_ops.EagerIterator) or
(isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):
inputs = training_utils.cast_if_floating_dtype(inputs)
else:
inputs = [
ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs
]
return self(inputs)
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
if self._distribution_strategy:
raise NotImplementedError('`fit_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
if self._distribution_strategy:
raise NotImplementedError('`evaluate_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
if self._distribution_strategy:
raise NotImplementedError('`predict_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def _get_callback_model(self):
if hasattr(self, '_replicated_model') and self._replicated_model:
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
def _make_callback_model(self, grouped_model):
first_replicated_model = self._distribution_strategy.unwrap(
grouped_model)[0]
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
class DistributedCallbackModel(Model):
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self,
orig_model_weights)
def __getattr__(self, item):
if item not in ['_setattr_tracking']:
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
| true | true |
f7f61f880dcad8ecfd1738ddd2e109a7c334617b | 4,606 | py | Python | py3plex/algorithms/multilayer_algorithms/entanglement.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | py3plex/algorithms/multilayer_algorithms/entanglement.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | py3plex/algorithms/multilayer_algorithms/entanglement.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Authors: Benjamin Renoust (github.com/renoust)
Date: 2018/02/13
Description: Loads a Detangler JSON format graph and compute unweighted entanglement analysis with Py3Plex
"""
import sys
print(sys.version)
import networkx as nx
from scipy.sparse.csgraph import csgraph_from_dense, connected_components
from scipy import spatial
import numpy as np
import itertools
import math
# Build the R and C matrix
def build_occurrence_matrix(network):
multiedges = network.get_edges()
layers = []
edge_list = []
for e in multiedges:
(n1,l1),(n2,l2) = e
if l1 == l2:
if l1 not in layers:
layers += [l1]
edge_list.append([n1,n2,l1])
edge_list = sorted(edge_list, key=lambda x: [x[0],x[1]])
nb_layers = len(layers)
r_matrix = np.zeros((nb_layers,nb_layers))
def count_overlap(overlap):
prev_layers = []
for e in overlap:
layer = e[2]
layer_index = layers.index(layer)
r_matrix[layer_index,layer_index] += 1.0
for l in prev_layers:
r_matrix[l,layer_index] += 1.0
r_matrix[layer_index,l] += 1.0
prev_layers.append(layer_index)
current_edge = None
flat_pairs = 0.0
overlap = []
for e in edge_list:
node_pair = [e[0],e[1]]
if current_edge != node_pair:
flat_pairs += 1.0
current_edge = node_pair
count_overlap(overlap)
overlap = []
overlap.append(e)
count_overlap(overlap)
flat_pairs += 1
c_matrix = r_matrix.copy()
for i in range(nb_layers):
c_matrix[i,i] /= flat_pairs
for i,j in itertools.combinations(range(nb_layers),2):
c_matrix[i,j] /= r_matrix[j][j]
c_matrix[j,i] /= r_matrix[i][i]
return c_matrix, layers
# proceeds with block decomposition
def compute_blocks(c_matrix):
c_sparse = csgraph_from_dense(c_matrix)
nb_components, labels = connected_components(c_sparse, directed=False, return_labels=True)
v2i = {}
for i,v in enumerate(labels):
v2i[v] = v2i.get(v, []) + [i]
blocks = []
indices = []
for v,i in v2i.items():
indices.append(i)
blocks.append(c_matrix[np.ix_(i,i)])
return indices, blocks
# computes entanglement for one block
def compute_entanglement(block_matrix):
eigenvals, eigenvects = np.linalg.eig(block_matrix)
max_eigenval = max(eigenvals.real)
index_first_eigenvect = np.argmax(eigenvals)
nb_layers = len(block_matrix)
# normalizes the max eigenval to dimensions
entanglement_intensity = max_eigenval/nb_layers
gamma_layers = []
for i in range(nb_layers):
gamma_layers.append(abs(eigenvects[i][index_first_eigenvect].real)) #because of approx.
# computes entanglement homogeneity, cosine distance with the [1...1] vector
entanglement_homogeneity = 1 - spatial.distance.cosine(gamma_layers, np.ones(nb_layers))
# normalizes within the top right quadrant (sorts of flatten the [0-1] value distribution)
normalized_entanglement_homogeneity = 1 - math.acos(entanglement_homogeneity)/(math.pi/2)
return [entanglement_intensity, entanglement_homogeneity, normalized_entanglement_homogeneity], gamma_layers
def compute_entanglement_analysis(network):
matrix, layers = build_occurrence_matrix(network)
indices, blocks = compute_blocks(matrix)
analysis = []
for i,b in enumerate(blocks):
layer_labels = [layers[x] for x in indices[i]]
[I,H,H_norm],gamma=compute_entanglement(b)
block_analysis = {
'Entanglement intensity':I,
'Layer entanglement': {layer_labels[x]:gamma[x] for x in range(len(gamma))},
'Entanglement homogeneity': H,
'Normalized homogeneity': H_norm
}
analysis.append(block_analysis)
return analysis
if __name__ == '__main__':
analysis = compute_entanglement_analysis(net)
print ("%d connected components of layers"%len(analysis))
for i,b in enumerate(analysis):
print ('--- block %d'%i)
layer_labels = b['Layer entanglement'].keys()
print ('Covering layers: %s'%layer_labels)
print ('Entanglement intensity: %f'%b['Entanglement intensity'])
print ('Layer entanglement: %s'%b['Layer entanglement'])
print ('Entanglement homogeneity: %f'%b['Entanglement homogeneity'])
print ('Normalized homogeneity: %f'%b['Normalized homogeneity'])
| 30.104575 | 112 | 0.649587 |
import sys
print(sys.version)
import networkx as nx
from scipy.sparse.csgraph import csgraph_from_dense, connected_components
from scipy import spatial
import numpy as np
import itertools
import math
def build_occurrence_matrix(network):
multiedges = network.get_edges()
layers = []
edge_list = []
for e in multiedges:
(n1,l1),(n2,l2) = e
if l1 == l2:
if l1 not in layers:
layers += [l1]
edge_list.append([n1,n2,l1])
edge_list = sorted(edge_list, key=lambda x: [x[0],x[1]])
nb_layers = len(layers)
r_matrix = np.zeros((nb_layers,nb_layers))
def count_overlap(overlap):
prev_layers = []
for e in overlap:
layer = e[2]
layer_index = layers.index(layer)
r_matrix[layer_index,layer_index] += 1.0
for l in prev_layers:
r_matrix[l,layer_index] += 1.0
r_matrix[layer_index,l] += 1.0
prev_layers.append(layer_index)
current_edge = None
flat_pairs = 0.0
overlap = []
for e in edge_list:
node_pair = [e[0],e[1]]
if current_edge != node_pair:
flat_pairs += 1.0
current_edge = node_pair
count_overlap(overlap)
overlap = []
overlap.append(e)
count_overlap(overlap)
flat_pairs += 1
c_matrix = r_matrix.copy()
for i in range(nb_layers):
c_matrix[i,i] /= flat_pairs
for i,j in itertools.combinations(range(nb_layers),2):
c_matrix[i,j] /= r_matrix[j][j]
c_matrix[j,i] /= r_matrix[i][i]
return c_matrix, layers
def compute_blocks(c_matrix):
c_sparse = csgraph_from_dense(c_matrix)
nb_components, labels = connected_components(c_sparse, directed=False, return_labels=True)
v2i = {}
for i,v in enumerate(labels):
v2i[v] = v2i.get(v, []) + [i]
blocks = []
indices = []
for v,i in v2i.items():
indices.append(i)
blocks.append(c_matrix[np.ix_(i,i)])
return indices, blocks
def compute_entanglement(block_matrix):
eigenvals, eigenvects = np.linalg.eig(block_matrix)
max_eigenval = max(eigenvals.real)
index_first_eigenvect = np.argmax(eigenvals)
nb_layers = len(block_matrix)
entanglement_intensity = max_eigenval/nb_layers
gamma_layers = []
for i in range(nb_layers):
gamma_layers.append(abs(eigenvects[i][index_first_eigenvect].real))
entanglement_homogeneity = 1 - spatial.distance.cosine(gamma_layers, np.ones(nb_layers))
normalized_entanglement_homogeneity = 1 - math.acos(entanglement_homogeneity)/(math.pi/2)
return [entanglement_intensity, entanglement_homogeneity, normalized_entanglement_homogeneity], gamma_layers
def compute_entanglement_analysis(network):
matrix, layers = build_occurrence_matrix(network)
indices, blocks = compute_blocks(matrix)
analysis = []
for i,b in enumerate(blocks):
layer_labels = [layers[x] for x in indices[i]]
[I,H,H_norm],gamma=compute_entanglement(b)
block_analysis = {
'Entanglement intensity':I,
'Layer entanglement': {layer_labels[x]:gamma[x] for x in range(len(gamma))},
'Entanglement homogeneity': H,
'Normalized homogeneity': H_norm
}
analysis.append(block_analysis)
return analysis
if __name__ == '__main__':
analysis = compute_entanglement_analysis(net)
print ("%d connected components of layers"%len(analysis))
for i,b in enumerate(analysis):
print ('--- block %d'%i)
layer_labels = b['Layer entanglement'].keys()
print ('Covering layers: %s'%layer_labels)
print ('Entanglement intensity: %f'%b['Entanglement intensity'])
print ('Layer entanglement: %s'%b['Layer entanglement'])
print ('Entanglement homogeneity: %f'%b['Entanglement homogeneity'])
print ('Normalized homogeneity: %f'%b['Normalized homogeneity'])
| true | true |
f7f61f99b14ff05744c7eb403d860339bcd27eae | 3,970 | py | Python | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | auth/decorators.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import logging
from functools import wraps
from flask import request, session
from prometheus_client import Counter
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
authentication_count = Counter(
"quay_authentication_attempts_total",
"number of authentication attempts accross the registry and API",
labelnames=["auth_kind", "success"],
)
def _auth_decorator(pass_result=False, handlers=None):
""" Builds an auth decorator that runs the given handlers and, if any return successfully,
sets up the auth context. The wrapped function will be invoked *regardless of success or
failure of the auth handler(s)*
"""
def processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth_header = request.headers.get("authorization", "")
result = None
for handler in handlers:
result = handler(auth_header)
# If the handler was missing the necessary information, skip it and try the next one.
if result.missing:
continue
# Check for a valid result.
if result.auth_valid:
logger.debug("Found valid auth result: %s", result.tuple())
# Set the various pieces of the auth context.
result.apply_to_context()
# Log the metric.
authentication_count.labels(result.kind, True).inc()
break
# Otherwise, report the error.
if result.error_message is not None:
# Log the failure.
authentication_count.labels(result.kind, False).inc()
break
if pass_result:
kwargs["auth_result"] = result
return func(*args, **kwargs)
return wrapper
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
""" Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If
a valid session cookie does exist, the authenticated user and identity are also set.
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
return func(*args, **kwargs)
elif not result.missing:
authentication_count.labels(result.kind, False).inc()
abort(401, message="Method requires login and no valid login could be loaded.")
return wrapper
def extract_namespace_repo_from_session(func):
""" Extracts the namespace and repository name from the current session (which must exist)
and passes them into the decorated function as the first and second arguments. If the
session doesn't exist or does not contain these arugments, a 400 error is raised.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if "namespace" not in session or "repository" not in session:
logger.error("Unable to load namespace or repository from session: %s", session)
abort(400, message="Missing namespace in request")
return func(session["namespace"], session["repository"], *args, **kwargs)
return wrapper
| 35.446429 | 101 | 0.668766 | import logging
from functools import wraps
from flask import request, session
from prometheus_client import Counter
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
authentication_count = Counter(
"quay_authentication_attempts_total",
"number of authentication attempts accross the registry and API",
labelnames=["auth_kind", "success"],
)
def _auth_decorator(pass_result=False, handlers=None):
def processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth_header = request.headers.get("authorization", "")
result = None
for handler in handlers:
result = handler(auth_header)
if result.missing:
continue
if result.auth_valid:
logger.debug("Found valid auth result: %s", result.tuple())
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
break
if result.error_message is not None:
authentication_count.labels(result.kind, False).inc()
break
if pass_result:
kwargs["auth_result"] = result
return func(*args, **kwargs)
return wrapper
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
return func(*args, **kwargs)
elif not result.missing:
authentication_count.labels(result.kind, False).inc()
abort(401, message="Method requires login and no valid login could be loaded.")
return wrapper
def extract_namespace_repo_from_session(func):
@wraps(func)
def wrapper(*args, **kwargs):
if "namespace" not in session or "repository" not in session:
logger.error("Unable to load namespace or repository from session: %s", session)
abort(400, message="Missing namespace in request")
return func(session["namespace"], session["repository"], *args, **kwargs)
return wrapper
| true | true |
f7f61fb1356bf9e8bfac36908160f8966f6e6289 | 2,026 | py | Python | research/mobilenet/mobilenet_v2.py | luotigerlsx/models_archive | c6578521ae61df7298003b42526b03e78d2d0d4b | [
"Apache-2.0"
] | null | null | null | research/mobilenet/mobilenet_v2.py | luotigerlsx/models_archive | c6578521ae61df7298003b42526b03e78d2d0d4b | [
"Apache-2.0"
] | null | null | null | research/mobilenet/mobilenet_v2.py | luotigerlsx/models_archive | c6578521ae61df7298003b42526b03e78d2d0d4b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v2.
Adapted from tf.keras.applications.mobilenet_v2.MobileNetV2().
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
import logging
import tensorflow as tf
from research.mobilenet import common_modules
from research.mobilenet.configs import archs
layers = tf.keras.layers
MobileNetV2Config = archs.MobileNetV2Config
def mobilenet_v2(config: MobileNetV2Config = MobileNetV2Config()
) -> tf.keras.models.Model:
"""Instantiates the MobileNet Model."""
model_name = config.name
input_shape = config.input_shape
img_input = layers.Input(shape=input_shape, name='Input')
# build network base
x = common_modules.mobilenet_base(img_input, config)
# build classification head
x = common_modules.mobilenet_head(x, config)
return tf.keras.models.Model(inputs=img_input,
outputs=x,
name=model_name)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)-15s:%(levelname)s:%(module)s:%(message)s',
level=logging.INFO)
model = mobilenet_v2()
model.compile(
optimizer='adam',
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_crossentropy])
logging.info(model.summary())
| 30.238806 | 79 | 0.701382 |
import logging
import tensorflow as tf
from research.mobilenet import common_modules
from research.mobilenet.configs import archs
layers = tf.keras.layers
MobileNetV2Config = archs.MobileNetV2Config
def mobilenet_v2(config: MobileNetV2Config = MobileNetV2Config()
) -> tf.keras.models.Model:
model_name = config.name
input_shape = config.input_shape
img_input = layers.Input(shape=input_shape, name='Input')
x = common_modules.mobilenet_base(img_input, config)
x = common_modules.mobilenet_head(x, config)
return tf.keras.models.Model(inputs=img_input,
outputs=x,
name=model_name)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)-15s:%(levelname)s:%(module)s:%(message)s',
level=logging.INFO)
model = mobilenet_v2()
model.compile(
optimizer='adam',
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_crossentropy])
logging.info(model.summary())
| true | true |
f7f61fb47b9b9567919108098fa7b08c5a6188c7 | 1,419 | py | Python | test/programytest/clients/restful/test_client.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 2 | 2018-06-16T09:32:22.000Z | 2019-07-21T13:16:00.000Z | test/programytest/clients/restful/test_client.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 3 | 2020-07-16T04:00:42.000Z | 2021-03-31T18:52:22.000Z | test/programytest/clients/restful/test_client.py | whackur/chatbot | bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7 | [
"MIT"
] | 4 | 2018-06-29T23:50:44.000Z | 2020-11-05T08:13:47.000Z | import unittest
import unittest.mock
import os
from programy.clients.restful.client import RestBotClient
from programy.clients.restful.config import RestConfiguration
from programytest.clients.arguments import MockArgumentParser
class RestBotClientTests(unittest.TestCase):
def test_init(self):
arguments = MockArgumentParser()
client = RestBotClient("testrest", arguments)
self.assertIsNotNone(client)
self.assertIsNotNone(client.get_client_configuration())
self.assertIsInstance(client.get_client_configuration(), RestConfiguration)
self.assertEquals([], client.api_keys)
request = unittest.mock.Mock()
response, code = client.process_request(request)
def test_api_keys(self):
arguments = MockArgumentParser()
client = RestBotClient("testrest", arguments)
self.assertIsNotNone(client)
client.configuration.client_configuration._use_api_keys = True
client.configuration.client_configuration._api_key_file = os.path.dirname(__file__) + os.sep + ".." + os.sep + "api_keys.txt"
client.load_api_keys()
self.assertEquals(3, len(client.api_keys))
self.assertTrue(client.is_apikey_valid("11111111"))
self.assertTrue(client.is_apikey_valid("22222222"))
self.assertTrue(client.is_apikey_valid("33333333"))
self.assertFalse(client.is_apikey_valid("99999999")) | 38.351351 | 133 | 0.731501 | import unittest
import unittest.mock
import os
from programy.clients.restful.client import RestBotClient
from programy.clients.restful.config import RestConfiguration
from programytest.clients.arguments import MockArgumentParser
class RestBotClientTests(unittest.TestCase):
def test_init(self):
arguments = MockArgumentParser()
client = RestBotClient("testrest", arguments)
self.assertIsNotNone(client)
self.assertIsNotNone(client.get_client_configuration())
self.assertIsInstance(client.get_client_configuration(), RestConfiguration)
self.assertEquals([], client.api_keys)
request = unittest.mock.Mock()
response, code = client.process_request(request)
def test_api_keys(self):
arguments = MockArgumentParser()
client = RestBotClient("testrest", arguments)
self.assertIsNotNone(client)
client.configuration.client_configuration._use_api_keys = True
client.configuration.client_configuration._api_key_file = os.path.dirname(__file__) + os.sep + ".." + os.sep + "api_keys.txt"
client.load_api_keys()
self.assertEquals(3, len(client.api_keys))
self.assertTrue(client.is_apikey_valid("11111111"))
self.assertTrue(client.is_apikey_valid("22222222"))
self.assertTrue(client.is_apikey_valid("33333333"))
self.assertFalse(client.is_apikey_valid("99999999")) | true | true |
f7f61fda483a69cbbb1c7b5f71d1d2b4aed05efb | 5,420 | py | Python | configtree/formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | configtree/formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | configtree/formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | """
The module provides formatters of :class:`configtree.tree.Tree` objects
.. data:: map
Dictionary that stores map of formatters. It is filled using
`entry points`_ named ``configtree.formatter``. But can be also modified
within ``loaderconf.py`` module to add ad hoc formatter.
See :mod:`configtree.loader`.
The map is used by script :func:`configtree.script.ctdump` to load
available formatters and print result.
.. _entry points: https://pythonhosted.org/setuptools/setuptools.html
#dynamic-discovery-of-services-and-plugins
"""
import json
from os import linesep
from pkg_resources import iter_entry_points
from numbers import Number
from .tree import rarefy
from .compat.types import string, chars
from .compat.colabc import Mapping, Sequence
def option(name, **kw):
"""
Decorator that adds ``__options__`` list to formatter and puts passed
parameters into the list as a tuple ``(name, kw)``.
The ``__options__`` list is used by script :func:`configtree.script.ctdump`
to include options into its argument parser. See :mod:`argparse`.
:param str name: Option name
:param dict kw: Option parameters that are passed into
:meth:`argparse.ArgumentParser.add_argument`
"""
def decorator(f):
if not hasattr(f, "__options__"):
f.__options__ = []
f.__options__.append((name, kw))
return f
return decorator
@option("rare", action="store_true", help="rarefy tree (default: %(default)s)")
@option("sort", action="store_true", help="sort keys (default: %(default)s)")
@option(
"indent",
type=int,
default=None,
metavar="<indent>",
help="indent size (default: %(default)s)",
)
def to_json(tree, rare=False, indent=None, sort=False):
"""
Format ``tree`` into JSON
:param Tree tree: Tree object to format
:param bool rare: Use :func:`configtree.tree.rarefy` on tree before format
:param int indent: Indent size
:param bool sort: Sort keys
Examples:
.. code-block:: pycon
>>> from configtree import Tree
>>> tree = Tree({'a.x': "Foo", 'a.y': "Bar"})
>>> result = to_json(tree, indent=4, sort=True)
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
{
"a.x": "Foo",
"a.y": "Bar"
}
>>> result = to_json(tree, rare=True, indent=4, sort=True)
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
{
"a": {
"x": "Foo",
"y": "Bar"
}
}
"""
if isinstance(tree, Mapping):
if rare:
tree = rarefy(tree)
else:
tree = dict(tree)
return json.dumps(tree, indent=indent, sort_keys=sort)
@option(
"prefix", default="", metavar="<prefix>", help="key prefix (default: empty string)"
)
@option(
"seq_sep",
default=" ",
metavar="<sep>",
help="sequence items separator (default: space char)",
)
@option("sort", action="store_true", help="sort keys (default: %(default)s)")
@option(
"capitalize", action="store_true", help="capitalize keys (default: %(default)s)"
)
def to_shell(tree, prefix="", seq_sep=" ", sort=False, capitalize=False):
"""
Format ``tree`` into shell (Bash) expression format
:param Tree tree: Tree object to format
:param bool prefix: Key prefix
:param str seq_sep: Sequence items separator
:param bool sort: Sort keys
:param bool capitalize: Capitalize keys
Examples:
.. code-block:: pycon
>>> from configtree import Tree
>>> tree = Tree({'a.x': "Foo", 'a.y': "Bar"})
>>> result = to_shell(tree, prefix='local ', sort=True)
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
local a_x='Foo'
local a_y='Bar'
>>> result = to_shell(tree, sort=True, capitalize=True)
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
A_X='Foo'
A_Y='Bar'
>>> tree = Tree({'list': [1, 2, 3]})
>>> result = to_shell(tree)
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
list='1 2 3'
>>> result = to_shell(tree, seq_sep=':')
>>> print(result) # doctest: +NORMALIZE_WHITESPACE
list='1:2:3'
"""
def convert(value):
if value is None:
return "''"
if isinstance(value, bool):
return string(value).lower()
if isinstance(value, Number):
return string(value)
if isinstance(value, Sequence) and not isinstance(value, chars):
return u"'%s'" % seq_sep.join(
string(item).replace("'", "\\'") for item in value
)
return u"'%s'" % string(value).replace("'", "\\'")
result = []
if isinstance(tree, Mapping):
keys = tree.keys()
if sort:
keys = sorted(keys)
for key in keys:
value = convert(tree[key])
key = key.replace(tree._key_sep, "_")
if capitalize:
key = key.upper()
result.append(u"%s%s=%s" % (prefix, key, value))
else:
value = convert(tree)
result.append(u"%s%s" % (prefix, value))
return linesep.join(result)
map = {}
for entry_point in iter_entry_points("configtree.formatter"):
map[entry_point.name] = entry_point.load()
| 29.139785 | 87 | 0.586347 |
import json
from os import linesep
from pkg_resources import iter_entry_points
from numbers import Number
from .tree import rarefy
from .compat.types import string, chars
from .compat.colabc import Mapping, Sequence
def option(name, **kw):
def decorator(f):
if not hasattr(f, "__options__"):
f.__options__ = []
f.__options__.append((name, kw))
return f
return decorator
@option("rare", action="store_true", help="rarefy tree (default: %(default)s)")
@option("sort", action="store_true", help="sort keys (default: %(default)s)")
@option(
"indent",
type=int,
default=None,
metavar="<indent>",
help="indent size (default: %(default)s)",
)
def to_json(tree, rare=False, indent=None, sort=False):
if isinstance(tree, Mapping):
if rare:
tree = rarefy(tree)
else:
tree = dict(tree)
return json.dumps(tree, indent=indent, sort_keys=sort)
@option(
"prefix", default="", metavar="<prefix>", help="key prefix (default: empty string)"
)
@option(
"seq_sep",
default=" ",
metavar="<sep>",
help="sequence items separator (default: space char)",
)
@option("sort", action="store_true", help="sort keys (default: %(default)s)")
@option(
"capitalize", action="store_true", help="capitalize keys (default: %(default)s)"
)
def to_shell(tree, prefix="", seq_sep=" ", sort=False, capitalize=False):
def convert(value):
if value is None:
return "''"
if isinstance(value, bool):
return string(value).lower()
if isinstance(value, Number):
return string(value)
if isinstance(value, Sequence) and not isinstance(value, chars):
return u"'%s'" % seq_sep.join(
string(item).replace("'", "\\'") for item in value
)
return u"'%s'" % string(value).replace("'", "\\'")
result = []
if isinstance(tree, Mapping):
keys = tree.keys()
if sort:
keys = sorted(keys)
for key in keys:
value = convert(tree[key])
key = key.replace(tree._key_sep, "_")
if capitalize:
key = key.upper()
result.append(u"%s%s=%s" % (prefix, key, value))
else:
value = convert(tree)
result.append(u"%s%s" % (prefix, value))
return linesep.join(result)
map = {}
for entry_point in iter_entry_points("configtree.formatter"):
map[entry_point.name] = entry_point.load()
| true | true |
f7f6205dc7b35a9a40fd2de82c28be05ca417e97 | 398 | py | Python | backend/personal_area/logout.py | DuMoH112/cloud_strorage | f995f9a71c9317493e3092a7ec31108afbdc8e05 | [
"MIT"
] | null | null | null | backend/personal_area/logout.py | DuMoH112/cloud_strorage | f995f9a71c9317493e3092a7ec31108afbdc8e05 | [
"MIT"
] | null | null | null | backend/personal_area/logout.py | DuMoH112/cloud_strorage | f995f9a71c9317493e3092a7ec31108afbdc8e05 | [
"MIT"
] | null | null | null | from flask import Blueprint, request, jsonify
from Database.redis import Redis_db
from app.auth_utils import auth_user
logout_bp = Blueprint('logout', __name__)
@logout_bp.route('/back/logout', methods=['GET'])
@auth_user(name_func='logout')
def logout(user):
"""Logout Page"""
r = Redis_db()
r.del_user(user.get_token())
return jsonify({'message': 'Пользователь вышел'}), 401
| 23.411765 | 58 | 0.71608 | from flask import Blueprint, request, jsonify
from Database.redis import Redis_db
from app.auth_utils import auth_user
logout_bp = Blueprint('logout', __name__)
@logout_bp.route('/back/logout', methods=['GET'])
@auth_user(name_func='logout')
def logout(user):
r = Redis_db()
r.del_user(user.get_token())
return jsonify({'message': 'Пользователь вышел'}), 401
| true | true |
f7f6211b797e00065bc1601dcbb284e1adc5b5af | 2,631 | py | Python | setup.py | zyfra/ebonite | b01b662c43709d152940f488574d78ff25f89ecf | [
"Apache-2.0"
] | 270 | 2019-11-14T15:46:08.000Z | 2021-09-17T16:43:03.000Z | setup.py | leepand/ebonite | b01b662c43709d152940f488574d78ff25f89ecf | [
"Apache-2.0"
] | 14 | 2019-11-29T11:49:39.000Z | 2022-02-10T00:23:59.000Z | setup.py | leepand/ebonite | b01b662c43709d152940f488574d78ff25f89ecf | [
"Apache-2.0"
] | 18 | 2019-11-22T13:15:14.000Z | 2021-09-01T13:36:12.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
# allow setup.py to run from another directory
here = os.path.dirname(__file__)
here and os.chdir(here)
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
def get_requirements(file_name):
return [r for r in read(file_name).split('\n') if r and not r.startswith('#')]
setup_args = dict(
name='ebonite',
version='0.7.0',
license='Apache-2.0',
description='Machine Learning Lifecycle Framework',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Mikhail Sveshnikov',
author_email='mike0sv@gmail.com',
url='https://github.com/zyfra/ebonite',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
],
project_urls={
'Changelog': 'https://github.com/zyfra/ebonite/blob/master/CHANGELOG.rst',
'Issue Tracker': 'https://github.com/zyfra/ebonite/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=3.6',
install_requires=get_requirements('requirements.txt'),
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
'testing': get_requirements('test.requirements.txt')
},
setup_requires=[
'pytest-runner',
],
entry_points={
'console_scripts': [
'ebnt = ebonite.cli:main',
]
},
)
if __name__ == '__main__':
setup(**setup_args)
| 30.241379 | 96 | 0.607754 |
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
here = os.path.dirname(__file__)
here and os.chdir(here)
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
def get_requirements(file_name):
return [r for r in read(file_name).split('\n') if r and not r.startswith('#')]
setup_args = dict(
name='ebonite',
version='0.7.0',
license='Apache-2.0',
description='Machine Learning Lifecycle Framework',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Mikhail Sveshnikov',
author_email='mike0sv@gmail.com',
url='https://github.com/zyfra/ebonite',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
],
project_urls={
'Changelog': 'https://github.com/zyfra/ebonite/blob/master/CHANGELOG.rst',
'Issue Tracker': 'https://github.com/zyfra/ebonite/issues',
},
keywords=[
],
python_requires='>=3.6',
install_requires=get_requirements('requirements.txt'),
extras_require={
'testing': get_requirements('test.requirements.txt')
},
setup_requires=[
'pytest-runner',
],
entry_points={
'console_scripts': [
'ebnt = ebonite.cli:main',
]
},
)
if __name__ == '__main__':
setup(**setup_args)
| true | true |
f7f6216882e151130b9369db9dec41cc28f56169 | 258 | py | Python | publicdata/chis/__init__.py | CivicKnowledge/publicdata | 37210e3c3b89cf8068feb79a2f12923b3cb5c336 | [
"MIT"
] | 2 | 2017-10-10T18:53:40.000Z | 2020-05-28T21:49:01.000Z | publicdata/chis/__init__.py | CivicKnowledge/publicdata | 37210e3c3b89cf8068feb79a2f12923b3cb5c336 | [
"MIT"
] | 7 | 2018-10-02T15:53:22.000Z | 2019-01-27T23:06:32.000Z | publicdata/chis/__init__.py | CivicKnowledge/publicdata | 37210e3c3b89cf8068feb79a2f12923b3cb5c336 | [
"MIT"
] | 2 | 2018-08-31T15:46:52.000Z | 2019-09-18T05:31:28.000Z | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE
"""
Functions for preparing a chis dataset
"""
from .prepare import *
from .estimate import *
from .recode import *
| 21.5 | 82 | 0.748062 |
from .prepare import *
from .estimate import *
from .recode import *
| true | true |
f7f621887740e725fb50e66b26597cdd9e419226 | 25,090 | py | Python | espnet/tts/pytorch_backend/tts.py | kokeshing/espnet | 9e2bfc5cdecbb8846f5c6cb26d22010b06e98c40 | [
"Apache-2.0"
] | null | null | null | espnet/tts/pytorch_backend/tts.py | kokeshing/espnet | 9e2bfc5cdecbb8846f5c6cb26d22010b06e98c40 | [
"Apache-2.0"
] | null | null | null | espnet/tts/pytorch_backend/tts.py | kokeshing/espnet | 9e2bfc5cdecbb8846f5c6cb26d22010b06e98c40 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""E2E-TTS training / decoding functions."""
import copy
import json
import logging
import math
import os
import time
import chainer
import kaldiio
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
from espnet.utils.training.iterators import ShufflingEnabler
import matplotlib
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from tensorboardX import SummaryWriter
matplotlib.use('Agg')
class CustomEvaluator(BaseEvaluator):
"""Custom evaluator."""
def __init__(self, model, iterator, target, device):
"""Initilize module.
Args:
model (torch.nn.Module): Pytorch model instance.
iterator (chainer.dataset.Iterator): Iterator for validation.
target (chainer.Chain): Dummy chain instance.
device (torch.device): The device to be used in evaluation.
"""
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
# The core part of the update routine can be customized by overriding.
def evaluate(self):
"""Evaluate over validation iterator."""
iterator = self._iterators['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = chainer.reporter.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
observation = {}
with chainer.reporter.report_scope(observation):
# convert to torch tensor
if isinstance(x, tuple):
self.model(*x)
else:
self.model(**x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
"""Custom updater."""
def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):
"""Initilize module.
Args:
model (torch.nn.Module) model: Pytorch model instance.
grad_clip (float) grad_clip : The gradient clipping value.
iterator (chainer.dataset.Iterator): Iterator for training.
optimizer (torch.optim.Optimizer) : Pytorch optimizer instance.
device (torch.device): The device to be used in training.
"""
super(CustomUpdater, self).__init__(iterator, optimizer)
self.model = model
self.grad_clip = grad_clip
self.device = device
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.accum_grad = accum_grad
self.forward_count = 0
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Update model one step."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
# Get the next batch (a list of json files)
batch = train_iter.next()
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
# compute loss and gradient
if isinstance(x, tuple):
loss = self.model(*x).mean() / self.accum_grad
else:
loss = self.model(**x).mean() / self.accum_grad
loss.backward()
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)
logging.debug('grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
"""Run update function."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom converter."""
def __init__(self):
"""Initilize module."""
# NOTE: keep as class for future development
pass
def __call__(self, batch, device=torch.device('cpu')):
"""Convert a given batch.
Args:
batch (list): List of ndarrays.
device (torch.device): The device to be send.
Returns:
dict: Dict of converted tensors.
Examples:
>>> batch = [([np.arange(5), np.arange(3)],
[np.random.randn(8, 2), np.random.randn(4, 2)],
None, None)]
>>> conveter = CustomConverter()
>>> conveter(batch, torch.device("cpu"))
{'xs': tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 0, 0]]),
'ilens': tensor([5, 3]),
'ys': tensor([[[-0.4197, -1.1157],
[-1.5837, -0.4299],
[-2.0491, 0.9215],
[-2.4326, 0.8891],
[ 1.2323, 1.7388],
[-0.3228, 0.6656],
[-0.6025, 1.3693],
[-1.0778, 1.3447]],
[[ 0.1768, -0.3119],
[ 0.4386, 2.5354],
[-1.2181, -0.5918],
[-0.6858, -0.8843],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000]]]),
'labels': tensor([[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 1., 1., 1., 1.]]),
'olens': tensor([8, 4])}
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, spembs, extras = batch[0]
# get list of lengths (must be tensor for DataParallel)
ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)
olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)
# perform padding and conversion to tensor
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
# make labels for stop prediction
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1.0
# prepare dict
new_batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
# load speaker embedding
if spembs is not None:
spembs = torch.from_numpy(np.array(spembs)).float()
new_batch["spembs"] = spembs.to(device)
# load second target
if extras is not None:
extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)
new_batch["extras"] = extras.to(device)
return new_batch
def train(args):
"""Train E2E-TTS model."""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
# reverse input and output dimension
idim = int(valid_json[utts[0]]['output'][0]['shape'][1])
odim = int(valid_json[utts[0]]['input'][0]['shape'][1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
# get extra input and output dimenstion
if args.use_speaker_embedding:
args.spk_embed_dim = int(valid_json[utts[0]]['input'][1]['shape'][0])
else:
args.spk_embed_dim = None
if args.use_second_target:
args.spc_dim = int(valid_json[utts[0]]['input'][1]['shape'][1])
else:
args.spc_dim = None
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
# specify model architecture
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, TTSInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, TTSInterface)
logging.info(model)
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
if args.batch_size != 0:
logging.warning('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# Setup an optimizer
if args.opt == 'adam':
optimizer = torch.optim.Adam(
model.parameters(), args.lr, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# FIXME: TOO DIRTY HACK
setattr(optimizer, 'target', reporter)
setattr(optimizer, 'serialize', lambda s: reporter.serialize(s))
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
if use_sortagrad:
args.batch_sort_key = "input"
# make minibatch list (variable length)
train_batchset = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
valid_batchset = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
load_tr = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
load_cv = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
converter = CustomConverter()
# hack to make batchsize argument as 1
# actual bathsize is included in a list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train_batchset, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.num_iter_processes,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid_batchset, lambda data: converter([load_cv(data)])),
batch_size=1, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.num_iter_processes)}
# Set up a trainer
updater = CustomUpdater(model, args.grad_clip, train_iter, optimizer, device, args.accum_grad)
trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.outdir)
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# set intervals
eval_interval = (args.eval_interval_epochs, 'epoch')
save_interval = (args.save_interval_epochs, 'epoch')
report_interval = (args.report_interval_iters, 'iteration')
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(
model, valid_iter, reporter, device), trigger=eval_interval)
# Save snapshot for each epoch
trainer.extend(torch_snapshot(), trigger=save_interval)
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger(
'validation/main/loss', trigger=eval_interval))
# Save attention figure for each epoch
if args.num_save_attention > 0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + '/att_ws',
converter=converter,
transform=load_cv,
device=device, reverse=True)
trainer.extend(att_reporter, trigger=eval_interval)
else:
att_reporter = None
# Make a plot for training and validation values
if hasattr(model, "module"):
base_plot_keys = model.module.base_plot_keys
else:
base_plot_keys = model.base_plot_keys
plot_keys = []
for key in base_plot_keys:
plot_key = ['main/' + key, 'validation/main/' + key]
trainer.extend(extensions.PlotReport(
plot_key, 'epoch', file_name=key + '.png'), trigger=eval_interval)
plot_keys += plot_key
trainer.extend(extensions.PlotReport(
plot_keys, 'epoch', file_name='all_loss.png'), trigger=eval_interval)
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=report_interval))
report_keys = ['epoch', 'iteration', 'elapsed_time'] + plot_keys
trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)
trainer.extend(extensions.ProgressBar(), trigger=report_interval)
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
@torch.no_grad()
def decode(args):
"""Decode with E2E-TTS model."""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# show arguments
for key in sorted(vars(args).keys()):
logging.info('args: ' + key + ': ' + str(vars(args)[key]))
# define model
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, TTSInterface)
logging.info(model)
# load trained model parameters
logging.info('reading model parameters from ' + args.model)
torch_load(args.model, model)
model.eval()
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# read json data
with open(args.json, 'rb') as f:
js = json.load(f)['utts']
# check directory
outdir = os.path.dirname(args.out)
if len(outdir) != 0 and not os.path.exists(outdir):
os.makedirs(outdir)
load_inputs_and_targets = LoadInputsAndTargets(
mode='tts', load_input=False, sort_in_input_length=False,
use_speaker_embedding=train_args.use_speaker_embedding,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# define function for plot prob and att_ws
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
elif len(shape) == 4:
# for transformer attention weights, whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not os.path.exists(os.path.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(os.path.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
# define function to calculate focus rate (see section 3.3 in https://arxiv.org/abs/1905.09263)
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# define function to convert attention to duration
def _convert_att_to_duration(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
pass
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
# get the most diagonal head according to focus rate
att_ws = torch.cat([att_w for att_w in att_ws], dim=0) # (#heads * #layers, L, T)
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)
diagonal_head_idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx] # (L, T)
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# calculate duration from 2d attention weight
durations = torch.stack([att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])])
return durations.view(-1, 1).float()
# define writer instances
feat_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(o=args.out))
if args.save_durations:
dur_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "durations")))
if args.save_focus_rates:
fr_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "focus_rates")))
# start decoding
for idx, utt_id in enumerate(js.keys()):
# setup inputs
batch = [(utt_id, js[utt_id])]
data = load_inputs_and_targets(batch)
x = torch.LongTensor(data[0][0]).to(device)
spemb = None
if train_args.use_speaker_embedding:
spemb = torch.FloatTensor(data[1][0]).to(device)
# decode and write
start_time = time.time()
outs, probs, att_ws = model.inference(x, args, spemb=spemb)
logging.info("inference speed = %.1f frames / sec." % (
int(outs.size(0)) / (time.time() - start_time)))
if outs.size(0) == x.size(0) * args.maxlenratio:
logging.warning("output length reaches maximum length (%s)." % utt_id)
focus_rate = _calculate_focus_rete(att_ws)
logging.info('(%d/%d) %s (size: %d->%d, focus rate: %.3f)' % (
idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate))
feat_writer[utt_id] = outs.cpu().numpy()
if args.save_durations:
ds = _convert_att_to_duration(att_ws)
dur_writer[utt_id] = ds.cpu().numpy()
if args.save_focus_rates:
fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)
# plot and save prob and att_ws
if probs is not None:
_plot_and_save(probs.cpu().numpy(), os.path.dirname(args.out) + "/probs/%s_prob.png" % utt_id)
if att_ws is not None:
_plot_and_save(att_ws.cpu().numpy(), os.path.dirname(args.out) + "/att_ws/%s_att_ws.png" % utt_id)
# close file object
feat_writer.close()
if args.save_durations:
dur_writer.close()
if args.save_focus_rates:
fr_writer.close()
| 39.080997 | 110 | 0.60004 |
import copy
import json
import logging
import math
import os
import time
import chainer
import kaldiio
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
from espnet.utils.training.iterators import ShufflingEnabler
import matplotlib
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from tensorboardX import SummaryWriter
matplotlib.use('Agg')
class CustomEvaluator(BaseEvaluator):
def __init__(self, model, iterator, target, device):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
def evaluate(self):
iterator = self._iterators['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = chainer.reporter.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
observation = {}
with chainer.reporter.report_scope(observation):
if isinstance(x, tuple):
self.model(*x)
else:
self.model(**x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):
super(CustomUpdater, self).__init__(iterator, optimizer)
self.model = model
self.grad_clip = grad_clip
self.device = device
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.accum_grad = accum_grad
self.forward_count = 0
def update_core(self):
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
batch = train_iter.next()
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
if isinstance(x, tuple):
loss = self.model(*x).mean() / self.accum_grad
else:
loss = self.model(**x).mean() / self.accum_grad
loss.backward()
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)
logging.debug('grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
def __init__(self):
pass
def __call__(self, batch, device=torch.device('cpu')):
assert len(batch) == 1
xs, ys, spembs, extras = batch[0]
ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)
olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1.0
new_batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spembs is not None:
spembs = torch.from_numpy(np.array(spembs)).float()
new_batch["spembs"] = spembs.to(device)
if extras is not None:
extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)
new_batch["extras"] = extras.to(device)
return new_batch
def train(args):
set_deterministic_pytorch(args)
if not torch.cuda.is_available():
logging.warning('cuda is not available')
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]['output'][0]['shape'][1])
odim = int(valid_json[utts[0]]['input'][0]['shape'][1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
if args.use_speaker_embedding:
args.spk_embed_dim = int(valid_json[utts[0]]['input'][1]['shape'][0])
else:
args.spk_embed_dim = None
if args.use_second_target:
args.spc_dim = int(valid_json[utts[0]]['input'][1]['shape'][1])
else:
args.spc_dim = None
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, TTSInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, TTSInterface)
logging.info(model)
reporter = model.reporter
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
if args.batch_size != 0:
logging.warning('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
if args.opt == 'adam':
optimizer = torch.optim.Adam(
model.parameters(), args.lr, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
setattr(optimizer, 'target', reporter)
setattr(optimizer, 'serialize', lambda s: reporter.serialize(s))
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
if use_sortagrad:
args.batch_sort_key = "input"
train_batchset = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
valid_batchset = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
load_tr = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True},
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
load_cv = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False},
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
converter = CustomConverter()
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train_batchset, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.num_iter_processes,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid_batchset, lambda data: converter([load_cv(data)])),
batch_size=1, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.num_iter_processes)}
updater = CustomUpdater(model, args.grad_clip, train_iter, optimizer, device, args.accum_grad)
trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.outdir)
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
eval_interval = (args.eval_interval_epochs, 'epoch')
save_interval = (args.save_interval_epochs, 'epoch')
report_interval = (args.report_interval_iters, 'iteration')
trainer.extend(CustomEvaluator(
model, valid_iter, reporter, device), trigger=eval_interval)
trainer.extend(torch_snapshot(), trigger=save_interval)
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger(
'validation/main/loss', trigger=eval_interval))
if args.num_save_attention > 0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + '/att_ws',
converter=converter,
transform=load_cv,
device=device, reverse=True)
trainer.extend(att_reporter, trigger=eval_interval)
else:
att_reporter = None
if hasattr(model, "module"):
base_plot_keys = model.module.base_plot_keys
else:
base_plot_keys = model.base_plot_keys
plot_keys = []
for key in base_plot_keys:
plot_key = ['main/' + key, 'validation/main/' + key]
trainer.extend(extensions.PlotReport(
plot_key, 'epoch', file_name=key + '.png'), trigger=eval_interval)
plot_keys += plot_key
trainer.extend(extensions.PlotReport(
plot_keys, 'epoch', file_name='all_loss.png'), trigger=eval_interval)
trainer.extend(extensions.LogReport(trigger=report_interval))
report_keys = ['epoch', 'iteration', 'elapsed_time'] + plot_keys
trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)
trainer.extend(extensions.ProgressBar(), trigger=report_interval)
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
trainer.run()
check_early_stop(trainer, args.epochs)
@torch.no_grad()
def decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
for key in sorted(vars(args).keys()):
logging.info('args: ' + key + ': ' + str(vars(args)[key]))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, TTSInterface)
logging.info(model)
logging.info('reading model parameters from ' + args.model)
torch_load(args.model, model)
model.eval()
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
with open(args.json, 'rb') as f:
js = json.load(f)['utts']
outdir = os.path.dirname(args.out)
if len(outdir) != 0 and not os.path.exists(outdir):
os.makedirs(outdir)
load_inputs_and_targets = LoadInputsAndTargets(
mode='tts', load_input=False, sort_in_input_length=False,
use_speaker_embedding=train_args.use_speaker_embedding,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False}
)
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
elif len(shape) == 4:
shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not os.path.exists(os.path.dirname(figname)):
os.makedirs(os.path.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
def _calculate_focus_rete(att_ws):
if att_ws is None:
return 1.0
elif len(att_ws.shape) == 2:
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
x(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
def _convert_att_to_duration(att_ws):
if len(att_ws.shape) == 2:
pass
elif len(att_ws.shape) == 4:
torch.cat([att_w for att_w in att_ws], dim=0) .max(dim=-1)[0].mean(dim=-1) idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx]
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
durations = torch.stack([att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])])
return durations.view(-1, 1).float()
feat_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(o=args.out))
if args.save_durations:
dur_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "durations")))
if args.save_focus_rates:
fr_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "focus_rates")))
for idx, utt_id in enumerate(js.keys()):
batch = [(utt_id, js[utt_id])]
data = load_inputs_and_targets(batch)
x = torch.LongTensor(data[0][0]).to(device)
spemb = None
if train_args.use_speaker_embedding:
spemb = torch.FloatTensor(data[1][0]).to(device)
start_time = time.time()
outs, probs, att_ws = model.inference(x, args, spemb=spemb)
logging.info("inference speed = %.1f frames / sec." % (
int(outs.size(0)) / (time.time() - start_time)))
if outs.size(0) == x.size(0) * args.maxlenratio:
logging.warning("output length reaches maximum length (%s)." % utt_id)
focus_rate = _calculate_focus_rete(att_ws)
logging.info('(%d/%d) %s (size: %d->%d, focus rate: %.3f)' % (
idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate))
feat_writer[utt_id] = outs.cpu().numpy()
if args.save_durations:
ds = _convert_att_to_duration(att_ws)
dur_writer[utt_id] = ds.cpu().numpy()
if args.save_focus_rates:
fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)
if probs is not None:
_plot_and_save(probs.cpu().numpy(), os.path.dirname(args.out) + "/probs/%s_prob.png" % utt_id)
if att_ws is not None:
_plot_and_save(att_ws.cpu().numpy(), os.path.dirname(args.out) + "/att_ws/%s_att_ws.png" % utt_id)
feat_writer.close()
if args.save_durations:
dur_writer.close()
if args.save_focus_rates:
fr_writer.close()
| true | true |
f7f6220819e6f4f96c45892c8c216ab4d391c4d5 | 9,326 | py | Python | scripts/2d_recon.py | JulianKnodt/nerf_atlas | 6866713c498cea026cb215260a779a2c6c13246c | [
"Apache-2.0"
] | 57 | 2021-05-25T12:57:16.000Z | 2022-03-30T06:27:44.000Z | scripts/2d_recon.py | JulianKnodt/nerf_atlas | 6866713c498cea026cb215260a779a2c6c13246c | [
"Apache-2.0"
] | 9 | 2021-07-26T22:28:40.000Z | 2021-11-29T20:51:59.000Z | scripts/2d_recon.py | JulianKnodt/nerf_atlas | 6866713c498cea026cb215260a779a2c6c13246c | [
"Apache-2.0"
] | 12 | 2021-05-25T12:36:45.000Z | 2022-01-28T04:20:12.000Z | import sys
sys.path[0] = sys.path[0][:-len("scripts/")] # hacky way to treat it as root directory
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as tv
from src.neural_blocks import ( SkipConnMLP, FourierEncoder )
from src.utils import ( fat_sigmoid )
from tqdm import trange
ts = 100
epochs = 20_000
SCALE = 1
class LearnedImage(nn.Module):
def __init__(self):
super().__init__()
self.query = SkipConnMLP(
in_size=2, out=1, latent_size=0,
activation=torch.sin, num_layers=5, hidden_size=256, init="siren",
)
def forward(self, x):
return fat_sigmoid(self.query(x))
class PixelImage(nn.Module):
def __init__(self, frame):
super().__init__()
assert(len(frame.shape) == 3)
# just use a frame from the training data, ensures it has all elements
self.data = frame.permute(2,1,0)#nn.Parameter(torch.randn(1, 1, 256, 256))
def forward(self, x):
B = x.shape[0]
vals = F.grid_sample(
self.data.expand(B,-1,-1,-1),
x,
padding_mode="zero",
align_corners=False,
).permute(0,2,3,1)
return vals
class LIIF(nn.Module):
def __init__(
self,
reso:int=16,
emb_size:int=128
):
super().__init__()
self.grid = nn.Parameter(torch.randn(1, emb_size, reso, reso))
self.query = SkipConnMLP(
in_size=emb_size, out=1, latent_size=2,
activation=torch.sin, num_layers=5, hidden_size=256, init="siren",
)
def forward(self, x):
B = x.shape[0]
latent = F.grid_sample(
self.grid.expand(B, -1,-1,-1), x,
padding_mode="reflection",
align_corners=False,
).permute(0,2,3,1)
sz = latent.shape[1:3]
cell_size = torch.tensor([1/sz[0], 1/sz[1]]).to(x.device)
cell_size = cell_size[None,None,None].expand(B,sz[0],sz[1],-1)
return fat_sigmoid(self.query(latent, cell_size))
class DistanceImage(nn.Module):
def __init__(self, n=32):
super().__init__()
self.n = n
self.points = nn.Parameter(torch.randn(n, 2, requires_grad=True), requires_grad=True)
self.query = SkipConnMLP(
in_size=n, out=1, num_layers=5, hidden_size=512, init="xavier",
)
def forward(self, x): return self.from_pts(self.points, x)
def from_pts(self, pts, x):
pairwise_disp = pts[:, None, None, ...] - x[...,None,:]
pairwise_dist = pairwise_disp.square().sum(dim=-1)
return fat_sigmoid(self.query(1/(1e-5+pairwise_dist)))
#torch.autograd.set_detect_anomaly(True); print("DEBUG")
class LongAnimator(nn.Module):
def __init__(
self, img,
segments:int,
spline:int=16,
seg_emb_size:int=128,
anchor_interim:int=128,
):
super().__init__()
self.img = img
self.spline_n = spline
self.ses = ses = seg_emb_size
segments = int(segments)
self.segments = segments
self.midsize = anchor_interim
self.seg_emb = nn.Embedding(segments+2, ses, max_norm=1)
self.anchors = SkipConnMLP(
in_size=2, out=2+anchor_interim, latent_size=ses,
num_layers=5, hidden_size=512, init="xavier",
)
self.point_estim=SkipConnMLP(
in_size=2, out=(spline-2)*2,
num_layers=5, hidden_size=512,
latent_size=ses+2*anchor_interim, init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
t = t[:, None, None, None]
seg = t.floor().int().clamp(min=0)
emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1)).expand(-1, *x.shape[1:3], -1, -1)
anchors, anchor_latent = self.anchors(
x[..., None, :].expand(B,-1,-1,2,-1), emb,
).split([2, self.midsize], dim=-1)
start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]
point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)
midpts = torch.stack(
self.point_estim(x.expand(B,-1,-1,-1), point_estim_latent).split(2, dim=-1), dim=0
)
ctrl_pts = torch.cat([start, midpts-start, end], dim=0)
# Bound pts within some space
ctrl_pts = 2*ctrl_pts.tanh()
dx = de_casteljau(ctrl_pts, t.frac(), self.spline_n)
return self.img(x+dx)
class LongAnimatorPts(nn.Module):
def __init__(
self,
img: DistanceImage,
segments:int,
spline:int=5,
seg_emb_size:int=128,
anchor_interim:int=128,
):
super().__init__()
self.img = img
self.spline_n = spline
self.ses = ses = seg_emb_size
segments = int(segments)
self.segments = segments
self.midsize = anchor_interim
self.seg_emb = nn.Embedding(segments+2, ses)
self.anchors = SkipConnMLP(
in_size=2, out=2+anchor_interim, latent_size=ses,
num_layers=5, hidden_size=512, init="xavier",
)
self.point_estim=SkipConnMLP(
in_size=2, out=(spline-2)*2,
num_layers=5, hidden_size=512,
latent_size=ses+2*anchor_interim, init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
N = self.img.n
t = t[:, None]
seg = t.floor().int().clamp(min=0)
emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1))[:, None, ...].expand(B,N,2,-1)
rigs = self.img.points[None].expand(B,-1,-1) # [N:2]
anchors, anchor_latent = self.anchors(
rigs[...,None,:].expand(B,N,2,2), emb,
).split([2, self.midsize], dim=-1)
start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]
point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)
midpts = torch.stack(self.point_estim(rigs, point_estim_latent).split(2, dim=-1), dim=0)
ctrl_pts = torch.cat([start, midpts-start, end], dim=0)
dx = de_casteljau(ctrl_pts, t[:,None].frac(), self.spline_n)
return self.img.from_pts(rigs+dx, x)
# A single Skip Connected MLP
class SimpleAnimator(nn.Module):
def __init__(self, img, *args, **kwargs):
super().__init__()
self.img = img
self.pred = SkipConnMLP(
in_size=1, out=2,
num_layers=7, hidden_size=512,
init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
dx = self.pred(t[..., None])[:, None, None]
return self.img(x + dx)
def de_casteljau(coeffs, t, N: int):
betas = coeffs
m1t = 1 - t
for i in range(1, N): betas = betas[:-1] * m1t + betas[1:] * t
return betas.squeeze(0)
def fft_loss(x, ref):
got = torch.fft.rfft2(x, dim=(-3, -2), norm="ortho")
exp = torch.fft.rfft2(ref, dim=(-3, -2), norm="ortho")
return (got - exp).abs().mean()
def train(model, ref, times):
t = trange(epochs)
bs=min(12, times.shape[0])
grid = torch.stack(torch.meshgrid(
torch.linspace(-SCALE, SCALE, ts),
torch.linspace(-SCALE, SCALE, ts),
indexing="ij",
),dim=-1).unsqueeze(0).to(device)
opt = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-6)
for i in t:
opt.zero_grad()
for rs, ats in zip(ref.split(bs, dim=0), times.split(bs, dim=0)):
got = model(grid, ats)
exp = rs.to(device)
#loss = fft_loss(got, exp)
loss = F.mse_loss(got, exp) #loss = F.l1_loss(got, exp)
loss.backward()
opt.step()
t.set_postfix(L2=f"{loss.item():.02e}")
if i % 250 == 0:
with torch.no_grad():
pred_img = tv.utils.make_grid(got.permute(0,3,1,2)).cpu()
exp_img = tv.utils.make_grid(rs.permute(0,3,1,2))
result = torch.cat([pred_img, exp_img], dim=1)
tv.utils.save_image(result, f"outputs/animate_{i:05}.png")
if i % 1000 == 0 and i != 0:
torch.save(model, "models/animate_long.pt")
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
torch.save(model, "models/animate_long.pt")
def test(ref, model, num_secs, n:int=1800):
model = model.eval()
times = torch.linspace(0,num_secs,n,device=device)
grid = torch.stack(torch.meshgrid(
torch.linspace(-SCALE, SCALE, ts),
torch.linspace(-SCALE, SCALE, ts),
indexing="ij",
),dim=-1).unsqueeze(0).to(device)
batch_size = 12
out = []
for batch in times.split(batch_size, dim=0):
out.append(model(grid, batch))
out = torch.cat(out, dim=0).cpu()
loss = F.mse_loss(ref, out)
print("Final Loss", f"{loss.item():.03e}")
pred_img = tv.utils.make_grid(out.permute(0,3,1,2), num_secs)
tv.utils.save_image(pred_img, f"outputs/final.png")
tv.io.write_video("outputs/animation.mp4", out.expand(-1, -1, -1, 3)*255, int(n/num_secs))
device="cuda:0"
def main():
with torch.no_grad():
frames, _, info = tv.io.read_video("data/heider/animation.mp4", pts_unit="sec")
og_frames = frames
fps = info["video_fps"]
og_num_frames = frames.shape[0]
num_secs = int(frames.shape[0]//fps)
frames = frames[::int(fps//8)]
num_frames = frames.shape[0]
frames = (frames/255).mean(dim=-1, keepdim=True)
frames = tv.transforms.functional.resize(frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)
times = torch.linspace(0, num_secs, num_frames).to(device)
#model = LongAnimatorPts(DistanceImage(), segments=num_secs).to(device)
# frames[80] has all components
#model = LongAnimator(LIIF(), segments=num_secs).to(device)
#model = SimpleAnimator(LIIF()).to(device)
model = torch.load("models/animate_long.pt")
train(model, frames[:24], times[:24])
with torch.no_grad():
ref = tv.transforms.functional.resize(og_frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)
ref = (ref/255).mean(dim=-1, keepdim=True)
test(ref, model, num_secs, og_num_frames)
if __name__ == "__main__": main()
| 33.188612 | 96 | 0.63768 | import sys
sys.path[0] = sys.path[0][:-len("scripts/")]
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as tv
from src.neural_blocks import ( SkipConnMLP, FourierEncoder )
from src.utils import ( fat_sigmoid )
from tqdm import trange
ts = 100
epochs = 20_000
SCALE = 1
class LearnedImage(nn.Module):
def __init__(self):
super().__init__()
self.query = SkipConnMLP(
in_size=2, out=1, latent_size=0,
activation=torch.sin, num_layers=5, hidden_size=256, init="siren",
)
def forward(self, x):
return fat_sigmoid(self.query(x))
class PixelImage(nn.Module):
def __init__(self, frame):
super().__init__()
assert(len(frame.shape) == 3)
self.data = frame.permute(2,1,0)
def forward(self, x):
B = x.shape[0]
vals = F.grid_sample(
self.data.expand(B,-1,-1,-1),
x,
padding_mode="zero",
align_corners=False,
).permute(0,2,3,1)
return vals
class LIIF(nn.Module):
def __init__(
self,
reso:int=16,
emb_size:int=128
):
super().__init__()
self.grid = nn.Parameter(torch.randn(1, emb_size, reso, reso))
self.query = SkipConnMLP(
in_size=emb_size, out=1, latent_size=2,
activation=torch.sin, num_layers=5, hidden_size=256, init="siren",
)
def forward(self, x):
B = x.shape[0]
latent = F.grid_sample(
self.grid.expand(B, -1,-1,-1), x,
padding_mode="reflection",
align_corners=False,
).permute(0,2,3,1)
sz = latent.shape[1:3]
cell_size = torch.tensor([1/sz[0], 1/sz[1]]).to(x.device)
cell_size = cell_size[None,None,None].expand(B,sz[0],sz[1],-1)
return fat_sigmoid(self.query(latent, cell_size))
class DistanceImage(nn.Module):
def __init__(self, n=32):
super().__init__()
self.n = n
self.points = nn.Parameter(torch.randn(n, 2, requires_grad=True), requires_grad=True)
self.query = SkipConnMLP(
in_size=n, out=1, num_layers=5, hidden_size=512, init="xavier",
)
def forward(self, x): return self.from_pts(self.points, x)
def from_pts(self, pts, x):
pairwise_disp = pts[:, None, None, ...] - x[...,None,:]
pairwise_dist = pairwise_disp.square().sum(dim=-1)
return fat_sigmoid(self.query(1/(1e-5+pairwise_dist)))
class LongAnimator(nn.Module):
def __init__(
self, img,
segments:int,
spline:int=16,
seg_emb_size:int=128,
anchor_interim:int=128,
):
super().__init__()
self.img = img
self.spline_n = spline
self.ses = ses = seg_emb_size
segments = int(segments)
self.segments = segments
self.midsize = anchor_interim
self.seg_emb = nn.Embedding(segments+2, ses, max_norm=1)
self.anchors = SkipConnMLP(
in_size=2, out=2+anchor_interim, latent_size=ses,
num_layers=5, hidden_size=512, init="xavier",
)
self.point_estim=SkipConnMLP(
in_size=2, out=(spline-2)*2,
num_layers=5, hidden_size=512,
latent_size=ses+2*anchor_interim, init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
t = t[:, None, None, None]
seg = t.floor().int().clamp(min=0)
emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1)).expand(-1, *x.shape[1:3], -1, -1)
anchors, anchor_latent = self.anchors(
x[..., None, :].expand(B,-1,-1,2,-1), emb,
).split([2, self.midsize], dim=-1)
start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]
point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)
midpts = torch.stack(
self.point_estim(x.expand(B,-1,-1,-1), point_estim_latent).split(2, dim=-1), dim=0
)
ctrl_pts = torch.cat([start, midpts-start, end], dim=0)
ctrl_pts = 2*ctrl_pts.tanh()
dx = de_casteljau(ctrl_pts, t.frac(), self.spline_n)
return self.img(x+dx)
class LongAnimatorPts(nn.Module):
def __init__(
self,
img: DistanceImage,
segments:int,
spline:int=5,
seg_emb_size:int=128,
anchor_interim:int=128,
):
super().__init__()
self.img = img
self.spline_n = spline
self.ses = ses = seg_emb_size
segments = int(segments)
self.segments = segments
self.midsize = anchor_interim
self.seg_emb = nn.Embedding(segments+2, ses)
self.anchors = SkipConnMLP(
in_size=2, out=2+anchor_interim, latent_size=ses,
num_layers=5, hidden_size=512, init="xavier",
)
self.point_estim=SkipConnMLP(
in_size=2, out=(spline-2)*2,
num_layers=5, hidden_size=512,
latent_size=ses+2*anchor_interim, init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
N = self.img.n
t = t[:, None]
seg = t.floor().int().clamp(min=0)
emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1))[:, None, ...].expand(B,N,2,-1)
rigs = self.img.points[None].expand(B,-1,-1)
anchors, anchor_latent = self.anchors(
rigs[...,None,:].expand(B,N,2,2), emb,
).split([2, self.midsize], dim=-1)
start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]
point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)
midpts = torch.stack(self.point_estim(rigs, point_estim_latent).split(2, dim=-1), dim=0)
ctrl_pts = torch.cat([start, midpts-start, end], dim=0)
dx = de_casteljau(ctrl_pts, t[:,None].frac(), self.spline_n)
return self.img.from_pts(rigs+dx, x)
class SimpleAnimator(nn.Module):
def __init__(self, img, *args, **kwargs):
super().__init__()
self.img = img
self.pred = SkipConnMLP(
in_size=1, out=2,
num_layers=7, hidden_size=512,
init="xavier",
)
def forward(self, x, t):
B = t.shape[0]
dx = self.pred(t[..., None])[:, None, None]
return self.img(x + dx)
def de_casteljau(coeffs, t, N: int):
betas = coeffs
m1t = 1 - t
for i in range(1, N): betas = betas[:-1] * m1t + betas[1:] * t
return betas.squeeze(0)
def fft_loss(x, ref):
got = torch.fft.rfft2(x, dim=(-3, -2), norm="ortho")
exp = torch.fft.rfft2(ref, dim=(-3, -2), norm="ortho")
return (got - exp).abs().mean()
def train(model, ref, times):
t = trange(epochs)
bs=min(12, times.shape[0])
grid = torch.stack(torch.meshgrid(
torch.linspace(-SCALE, SCALE, ts),
torch.linspace(-SCALE, SCALE, ts),
indexing="ij",
),dim=-1).unsqueeze(0).to(device)
opt = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-6)
for i in t:
opt.zero_grad()
for rs, ats in zip(ref.split(bs, dim=0), times.split(bs, dim=0)):
got = model(grid, ats)
exp = rs.to(device)
loss = F.mse_loss(got, exp)
loss.backward()
opt.step()
t.set_postfix(L2=f"{loss.item():.02e}")
if i % 250 == 0:
with torch.no_grad():
pred_img = tv.utils.make_grid(got.permute(0,3,1,2)).cpu()
exp_img = tv.utils.make_grid(rs.permute(0,3,1,2))
result = torch.cat([pred_img, exp_img], dim=1)
tv.utils.save_image(result, f"outputs/animate_{i:05}.png")
if i % 1000 == 0 and i != 0:
torch.save(model, "models/animate_long.pt")
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
opt.step()
torch.save(model, "models/animate_long.pt")
def test(ref, model, num_secs, n:int=1800):
model = model.eval()
times = torch.linspace(0,num_secs,n,device=device)
grid = torch.stack(torch.meshgrid(
torch.linspace(-SCALE, SCALE, ts),
torch.linspace(-SCALE, SCALE, ts),
indexing="ij",
),dim=-1).unsqueeze(0).to(device)
batch_size = 12
out = []
for batch in times.split(batch_size, dim=0):
out.append(model(grid, batch))
out = torch.cat(out, dim=0).cpu()
loss = F.mse_loss(ref, out)
print("Final Loss", f"{loss.item():.03e}")
pred_img = tv.utils.make_grid(out.permute(0,3,1,2), num_secs)
tv.utils.save_image(pred_img, f"outputs/final.png")
tv.io.write_video("outputs/animation.mp4", out.expand(-1, -1, -1, 3)*255, int(n/num_secs))
device="cuda:0"
def main():
with torch.no_grad():
frames, _, info = tv.io.read_video("data/heider/animation.mp4", pts_unit="sec")
og_frames = frames
fps = info["video_fps"]
og_num_frames = frames.shape[0]
num_secs = int(frames.shape[0]//fps)
frames = frames[::int(fps//8)]
num_frames = frames.shape[0]
frames = (frames/255).mean(dim=-1, keepdim=True)
frames = tv.transforms.functional.resize(frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)
times = torch.linspace(0, num_secs, num_frames).to(device)
model = torch.load("models/animate_long.pt")
train(model, frames[:24], times[:24])
with torch.no_grad():
ref = tv.transforms.functional.resize(og_frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)
ref = (ref/255).mean(dim=-1, keepdim=True)
test(ref, model, num_secs, og_num_frames)
if __name__ == "__main__": main()
| true | true |
f7f62309a2f4b85e6994a7539ff6dee29a8fb9ef | 2,576 | py | Python | finetune.py | ytian81/CarND-Behavioral-Cloning-P3 | df912ae149035330f4e6be8a6c76a3271d522611 | [
"MIT"
] | null | null | null | finetune.py | ytian81/CarND-Behavioral-Cloning-P3 | df912ae149035330f4e6be8a6c76a3271d522611 | [
"MIT"
] | null | null | null | finetune.py | ytian81/CarND-Behavioral-Cloning-P3 | df912ae149035330f4e6be8a6c76a3271d522611 | [
"MIT"
] | null | null | null | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, Conv2D, Cropping2D, Dense, Dropout, Flatten, Lambda, MaxPool2D
from keras.models import Sequential
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from scipy import ndimage
from sklearn.utils import shuffle
from model import assemble_model
import csv
import matplotlib.pyplot as plt
import numpy as np
# data_folder = './data/'
# data_folder = './Track2/'
data_folder = './turn/'
def get_data():
# Read driving log data from csv file
lines = []
with open(data_folder+'/driving_log.csv') as f:
reader = csv.reader(f)
for line in reader:
lines.append(line)
# Modify image path and extract outputs
images = []
steering_angles = []
delta = 0.2
for line in lines:
# Use center, left and right images
angle_corrections = [0.0, delta, -delta]
for idx in range(3):
image_path = line[idx]
image_path = data_folder + '/IMG/' + image_path.split('/')[-1]
image = ndimage.imread(image_path)
images.append(image)
steering_angle = float(line[3]) + angle_corrections[idx]
steering_angles.append(steering_angle)
# Augment data (double the amount of data)
images.append(np.fliplr(image))
steering_angles.append(-steering_angle)
images = np.array(images)
steering_angles = np.array(steering_angles)
# shuffle data before split validation set
images, steering_angles = shuffle(images, steering_angles)
return images, steering_angles
X_train, y_train = get_data()
model = assemble_model()
model.load_weights('batch_128_model.h5')
model.compile(loss='mse', optimizer='adam')
# Train 15 epoches at most and save the best model, early stop if validation loss stops improving
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True, mode='min', verbose=0)
earlystop = EarlyStopping(monitor='val_loss', patience=3, mode='min')
history_object = model.fit(X_train, y_train, validation_split=0.3, shuffle=True, epochs=15,
callbacks=[checkpoint, earlystop])
# Draw training statistics
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('training_stats.jpg')
| 35.287671 | 104 | 0.707686 | from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, Conv2D, Cropping2D, Dense, Dropout, Flatten, Lambda, MaxPool2D
from keras.models import Sequential
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from scipy import ndimage
from sklearn.utils import shuffle
from model import assemble_model
import csv
import matplotlib.pyplot as plt
import numpy as np
data_folder = './turn/'
def get_data():
lines = []
with open(data_folder+'/driving_log.csv') as f:
reader = csv.reader(f)
for line in reader:
lines.append(line)
images = []
steering_angles = []
delta = 0.2
for line in lines:
angle_corrections = [0.0, delta, -delta]
for idx in range(3):
image_path = line[idx]
image_path = data_folder + '/IMG/' + image_path.split('/')[-1]
image = ndimage.imread(image_path)
images.append(image)
steering_angle = float(line[3]) + angle_corrections[idx]
steering_angles.append(steering_angle)
images.append(np.fliplr(image))
steering_angles.append(-steering_angle)
images = np.array(images)
steering_angles = np.array(steering_angles)
images, steering_angles = shuffle(images, steering_angles)
return images, steering_angles
X_train, y_train = get_data()
model = assemble_model()
model.load_weights('batch_128_model.h5')
model.compile(loss='mse', optimizer='adam')
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True, mode='min', verbose=0)
earlystop = EarlyStopping(monitor='val_loss', patience=3, mode='min')
history_object = model.fit(X_train, y_train, validation_split=0.3, shuffle=True, epochs=15,
callbacks=[checkpoint, earlystop])
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('training_stats.jpg')
| true | true |
f7f6233350d37c396d4bbf276e3b86cc1cc9dd2f | 11,352 | py | Python | ml/rl/training/sac_trainer.py | johncliu/Horizon | cfa7a873ada5de3bb01e78e2f237d9849b8270b2 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/training/sac_trainer.py | johncliu/Horizon | cfa7a873ada5de3bb01e78e2f237d9849b8270b2 | [
"BSD-3-Clause"
] | 1 | 2021-08-25T16:13:32.000Z | 2021-08-25T16:13:32.000Z | ml/rl/training/sac_trainer.py | johncliu/Horizon | cfa7a873ada5de3bb01e78e2f237d9849b8270b2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Optional
import ml.rl.types as rlt
import numpy as np
import torch
import torch.nn.functional as F
from ml.rl.tensorboardX import SummaryWriterContext
from ml.rl.thrift.core.ttypes import SACModelParameters
from ml.rl.training._parametric_dqn_predictor import _ParametricDQNPredictor
from ml.rl.training.actor_predictor import ActorPredictor
from ml.rl.training.rl_exporter import ActorExporter, ParametricDQNExporter
from ml.rl.training.rl_trainer_pytorch import RLTrainer, rescale_torch_tensor
logger = logging.getLogger(__name__)
class SACTrainer(RLTrainer):
"""
Soft Actor-Critic trainer as described in https://arxiv.org/pdf/1801.01290
The actor is assumed to implement reparameterization trick.
"""
def __init__(
self,
q1_network,
value_network,
value_network_target,
actor_network,
parameters: SACModelParameters,
q2_network=None,
min_action_range_tensor_training=None,
max_action_range_tensor_training=None,
min_action_range_tensor_serving=None,
max_action_range_tensor_serving=None,
) -> None:
"""
Args:
The four args below are provided for integration with other
environments (e.g., Gym):
min_action_range_tensor_training / max_action_range_tensor_training:
min / max value of actions at training time
min_action_range_tensor_serving / max_action_range_tensor_serving:
min / max value of actions at serving time
"""
self.minibatch_size = parameters.training.minibatch_size
super(SACTrainer, self).__init__(
parameters,
use_gpu=False,
additional_feature_types=None,
gradient_handler=None,
)
self.q1_network = q1_network
self.q1_network_optimizer = self._get_optimizer(
q1_network, parameters.training.q_network_optimizer
)
self.q2_network = q2_network
if self.q2_network is not None:
self.q2_network_optimizer = self._get_optimizer(
q2_network, parameters.training.q_network_optimizer
)
self.value_network = value_network
self.value_network_optimizer = self._get_optimizer(
value_network, parameters.training.value_network_optimizer
)
self.value_network_target = value_network_target
self.actor_network = actor_network
self.actor_network_optimizer = self._get_optimizer(
actor_network, parameters.training.actor_network_optimizer
)
self.entropy_temperature = parameters.training.entropy_temperature
self.logged_action_uniform_prior = (
parameters.training.logged_action_uniform_prior
)
# These ranges are only for Gym tests
self.min_action_range_tensor_training = min_action_range_tensor_training
self.max_action_range_tensor_training = max_action_range_tensor_training
self.min_action_range_tensor_serving = min_action_range_tensor_serving
self.max_action_range_tensor_serving = max_action_range_tensor_serving
def warm_start_components(self):
components = [
"q1_network",
"q1_network_optimizer",
"value_network",
"value_network_optimizer",
"value_network_target",
"actor_network",
"actor_network_optimizer",
]
if self.q2_network:
components += ["q2_network", "q2_network_optimizer"]
return components
def train(self, training_batch) -> None:
"""
IMPORTANT: the input action here is assumed to be preprocessed to match the
range of the output of the actor.
"""
if hasattr(training_batch, "as_parametric_sarsa_training_batch"):
training_batch = training_batch.as_parametric_sarsa_training_batch()
learning_input = training_batch.training_input
self.minibatch += 1
state = learning_input.state
action = learning_input.action
reward = learning_input.reward
discount = torch.full_like(reward, self.gamma)
not_done_mask = learning_input.not_terminal
if self._should_scale_action_in_train():
action = rlt.FeatureVector(
rescale_torch_tensor(
action.float_features,
new_min=self.min_action_range_tensor_training,
new_max=self.max_action_range_tensor_training,
prev_min=self.min_action_range_tensor_serving,
prev_max=self.max_action_range_tensor_serving,
)
)
current_state_action = rlt.StateAction(state=state, action=action)
q1_value = self.q1_network(current_state_action).q_value
min_q_value = q1_value
if self.q2_network:
q2_value = self.q2_network(current_state_action).q_value
min_q_value = torch.min(q1_value, q2_value)
# Use the minimum as target, ensure no gradient going through
min_q_value = min_q_value.detach()
#
# First, optimize value network; minimizing MSE between
# V(s) & Q(s, a) - log(pi(a|s))
#
state_value = self.value_network(state.float_features) # .q_value
if self.logged_action_uniform_prior:
log_prob_a = torch.zeros_like(min_q_value)
target_value = min_q_value
else:
with torch.no_grad():
log_prob_a = self.actor_network.get_log_prob(
state, action.float_features
)
log_prob_a = log_prob_a.clamp(-20.0, 20.0)
target_value = min_q_value - self.entropy_temperature * log_prob_a
value_loss = F.mse_loss(state_value, target_value)
self.value_network_optimizer.zero_grad()
value_loss.backward()
self.value_network_optimizer.step()
#
# Second, optimize Q networks; minimizing MSE between
# Q(s, a) & r + discount * V'(next_s)
#
with torch.no_grad():
next_state_value = (
self.value_network_target(learning_input.next_state.float_features)
* not_done_mask.float()
)
if self.minibatch < self.reward_burnin:
target_q_value = reward
else:
target_q_value = reward + discount * next_state_value
q1_loss = F.mse_loss(q1_value, target_q_value)
self.q1_network_optimizer.zero_grad()
q1_loss.backward()
self.q1_network_optimizer.step()
if self.q2_network:
q2_loss = F.mse_loss(q2_value, target_q_value)
self.q2_network_optimizer.zero_grad()
q2_loss.backward()
self.q2_network_optimizer.step()
#
# Lastly, optimize the actor; minimizing KL-divergence between action propensity
# & softmax of value. Due to reparameterization trick, it ends up being
# log_prob(actor_action) - Q(s, actor_action)
#
actor_output = self.actor_network(rlt.StateInput(state=state))
state_actor_action = rlt.StateAction(
state=state, action=rlt.FeatureVector(float_features=actor_output.action)
)
q1_actor_value = self.q1_network(state_actor_action).q_value
min_q_actor_value = q1_actor_value
if self.q2_network:
q2_actor_value = self.q2_network(state_actor_action).q_value
min_q_actor_value = torch.min(q1_actor_value, q2_actor_value)
actor_loss = (
self.entropy_temperature * actor_output.log_prob - min_q_actor_value
)
# Do this in 2 steps so we can log histogram of actor loss
actor_loss_mean = actor_loss.mean()
self.actor_network_optimizer.zero_grad()
actor_loss_mean.backward()
self.actor_network_optimizer.step()
if self.minibatch < self.reward_burnin:
# Reward burnin: force target network
self._soft_update(self.value_network, self.value_network_target, 1.0)
else:
# Use the soft update rule to update both target networks
self._soft_update(self.value_network, self.value_network_target, self.tau)
# Logging at the end to schedule all the cuda operations first
if (
self.tensorboard_logging_freq is not None
and self.minibatch % self.tensorboard_logging_freq == 0
):
SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value)
if self.q2_network:
SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value)
SummaryWriterContext.add_histogram("log_prob_a", log_prob_a)
SummaryWriterContext.add_histogram("value_network/target", target_value)
SummaryWriterContext.add_histogram(
"q_network/next_state_value", next_state_value
)
SummaryWriterContext.add_histogram(
"q_network/target_q_value", target_q_value
)
SummaryWriterContext.add_histogram(
"actor/min_q_actor_value", min_q_actor_value
)
SummaryWriterContext.add_histogram(
"actor/action_log_prob", actor_output.log_prob
)
SummaryWriterContext.add_histogram("actor/loss", actor_loss)
self.loss_reporter.report(
td_loss=q1_loss,
reward_loss=None,
logged_rewards=reward,
model_values_on_logged_actions=q1_value,
model_propensities=actor_output.log_prob.exp(),
model_values=min_q_actor_value,
)
def _should_scale_action_in_train(self):
if (
self.min_action_range_tensor_training is not None
and self.max_action_range_tensor_training is not None
and self.min_action_range_tensor_serving is not None
and self.max_action_range_tensor_serving is not None
):
return True
return False
def internal_prediction(self, states):
""" Returns list of actions output from actor network
:param states states as list of states to produce actions for
"""
self.actor_network.eval()
state_examples = torch.from_numpy(np.array(states)).type(self.dtype)
actions = self.actor_network(
rlt.StateInput(rlt.FeatureVector(float_features=state_examples))
)
# clamp actions to make sure actions are in the range
clamped_actions = torch.max(
torch.min(actions.action, self.max_action_range_tensor_training),
self.min_action_range_tensor_training,
)
rescaled_actions = rescale_torch_tensor(
clamped_actions,
new_min=self.min_action_range_tensor_serving,
new_max=self.max_action_range_tensor_serving,
prev_min=self.min_action_range_tensor_training,
prev_max=self.max_action_range_tensor_training,
)
self.actor_network.train()
return rescaled_actions
| 38.351351 | 88 | 0.656889 |
import logging
from typing import Optional
import ml.rl.types as rlt
import numpy as np
import torch
import torch.nn.functional as F
from ml.rl.tensorboardX import SummaryWriterContext
from ml.rl.thrift.core.ttypes import SACModelParameters
from ml.rl.training._parametric_dqn_predictor import _ParametricDQNPredictor
from ml.rl.training.actor_predictor import ActorPredictor
from ml.rl.training.rl_exporter import ActorExporter, ParametricDQNExporter
from ml.rl.training.rl_trainer_pytorch import RLTrainer, rescale_torch_tensor
logger = logging.getLogger(__name__)
class SACTrainer(RLTrainer):
def __init__(
self,
q1_network,
value_network,
value_network_target,
actor_network,
parameters: SACModelParameters,
q2_network=None,
min_action_range_tensor_training=None,
max_action_range_tensor_training=None,
min_action_range_tensor_serving=None,
max_action_range_tensor_serving=None,
) -> None:
self.minibatch_size = parameters.training.minibatch_size
super(SACTrainer, self).__init__(
parameters,
use_gpu=False,
additional_feature_types=None,
gradient_handler=None,
)
self.q1_network = q1_network
self.q1_network_optimizer = self._get_optimizer(
q1_network, parameters.training.q_network_optimizer
)
self.q2_network = q2_network
if self.q2_network is not None:
self.q2_network_optimizer = self._get_optimizer(
q2_network, parameters.training.q_network_optimizer
)
self.value_network = value_network
self.value_network_optimizer = self._get_optimizer(
value_network, parameters.training.value_network_optimizer
)
self.value_network_target = value_network_target
self.actor_network = actor_network
self.actor_network_optimizer = self._get_optimizer(
actor_network, parameters.training.actor_network_optimizer
)
self.entropy_temperature = parameters.training.entropy_temperature
self.logged_action_uniform_prior = (
parameters.training.logged_action_uniform_prior
)
self.min_action_range_tensor_training = min_action_range_tensor_training
self.max_action_range_tensor_training = max_action_range_tensor_training
self.min_action_range_tensor_serving = min_action_range_tensor_serving
self.max_action_range_tensor_serving = max_action_range_tensor_serving
def warm_start_components(self):
components = [
"q1_network",
"q1_network_optimizer",
"value_network",
"value_network_optimizer",
"value_network_target",
"actor_network",
"actor_network_optimizer",
]
if self.q2_network:
components += ["q2_network", "q2_network_optimizer"]
return components
def train(self, training_batch) -> None:
if hasattr(training_batch, "as_parametric_sarsa_training_batch"):
training_batch = training_batch.as_parametric_sarsa_training_batch()
learning_input = training_batch.training_input
self.minibatch += 1
state = learning_input.state
action = learning_input.action
reward = learning_input.reward
discount = torch.full_like(reward, self.gamma)
not_done_mask = learning_input.not_terminal
if self._should_scale_action_in_train():
action = rlt.FeatureVector(
rescale_torch_tensor(
action.float_features,
new_min=self.min_action_range_tensor_training,
new_max=self.max_action_range_tensor_training,
prev_min=self.min_action_range_tensor_serving,
prev_max=self.max_action_range_tensor_serving,
)
)
current_state_action = rlt.StateAction(state=state, action=action)
q1_value = self.q1_network(current_state_action).q_value
min_q_value = q1_value
if self.q2_network:
q2_value = self.q2_network(current_state_action).q_value
min_q_value = torch.min(q1_value, q2_value)
min_q_value = min_q_value.detach()
state_value = self.value_network(state.float_features)
if self.logged_action_uniform_prior:
log_prob_a = torch.zeros_like(min_q_value)
target_value = min_q_value
else:
with torch.no_grad():
log_prob_a = self.actor_network.get_log_prob(
state, action.float_features
)
log_prob_a = log_prob_a.clamp(-20.0, 20.0)
target_value = min_q_value - self.entropy_temperature * log_prob_a
value_loss = F.mse_loss(state_value, target_value)
self.value_network_optimizer.zero_grad()
value_loss.backward()
self.value_network_optimizer.step()
#
with torch.no_grad():
next_state_value = (
self.value_network_target(learning_input.next_state.float_features)
* not_done_mask.float()
)
if self.minibatch < self.reward_burnin:
target_q_value = reward
else:
target_q_value = reward + discount * next_state_value
q1_loss = F.mse_loss(q1_value, target_q_value)
self.q1_network_optimizer.zero_grad()
q1_loss.backward()
self.q1_network_optimizer.step()
if self.q2_network:
q2_loss = F.mse_loss(q2_value, target_q_value)
self.q2_network_optimizer.zero_grad()
q2_loss.backward()
self.q2_network_optimizer.step()
#
# Lastly, optimize the actor; minimizing KL-divergence between action propensity
# & softmax of value. Due to reparameterization trick, it ends up being
# log_prob(actor_action) - Q(s, actor_action)
#
actor_output = self.actor_network(rlt.StateInput(state=state))
state_actor_action = rlt.StateAction(
state=state, action=rlt.FeatureVector(float_features=actor_output.action)
)
q1_actor_value = self.q1_network(state_actor_action).q_value
min_q_actor_value = q1_actor_value
if self.q2_network:
q2_actor_value = self.q2_network(state_actor_action).q_value
min_q_actor_value = torch.min(q1_actor_value, q2_actor_value)
actor_loss = (
self.entropy_temperature * actor_output.log_prob - min_q_actor_value
)
# Do this in 2 steps so we can log histogram of actor loss
actor_loss_mean = actor_loss.mean()
self.actor_network_optimizer.zero_grad()
actor_loss_mean.backward()
self.actor_network_optimizer.step()
if self.minibatch < self.reward_burnin:
# Reward burnin: force target network
self._soft_update(self.value_network, self.value_network_target, 1.0)
else:
# Use the soft update rule to update both target networks
self._soft_update(self.value_network, self.value_network_target, self.tau)
# Logging at the end to schedule all the cuda operations first
if (
self.tensorboard_logging_freq is not None
and self.minibatch % self.tensorboard_logging_freq == 0
):
SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value)
if self.q2_network:
SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value)
SummaryWriterContext.add_histogram("log_prob_a", log_prob_a)
SummaryWriterContext.add_histogram("value_network/target", target_value)
SummaryWriterContext.add_histogram(
"q_network/next_state_value", next_state_value
)
SummaryWriterContext.add_histogram(
"q_network/target_q_value", target_q_value
)
SummaryWriterContext.add_histogram(
"actor/min_q_actor_value", min_q_actor_value
)
SummaryWriterContext.add_histogram(
"actor/action_log_prob", actor_output.log_prob
)
SummaryWriterContext.add_histogram("actor/loss", actor_loss)
self.loss_reporter.report(
td_loss=q1_loss,
reward_loss=None,
logged_rewards=reward,
model_values_on_logged_actions=q1_value,
model_propensities=actor_output.log_prob.exp(),
model_values=min_q_actor_value,
)
def _should_scale_action_in_train(self):
if (
self.min_action_range_tensor_training is not None
and self.max_action_range_tensor_training is not None
and self.min_action_range_tensor_serving is not None
and self.max_action_range_tensor_serving is not None
):
return True
return False
def internal_prediction(self, states):
self.actor_network.eval()
state_examples = torch.from_numpy(np.array(states)).type(self.dtype)
actions = self.actor_network(
rlt.StateInput(rlt.FeatureVector(float_features=state_examples))
)
# clamp actions to make sure actions are in the range
clamped_actions = torch.max(
torch.min(actions.action, self.max_action_range_tensor_training),
self.min_action_range_tensor_training,
)
rescaled_actions = rescale_torch_tensor(
clamped_actions,
new_min=self.min_action_range_tensor_serving,
new_max=self.max_action_range_tensor_serving,
prev_min=self.min_action_range_tensor_training,
prev_max=self.max_action_range_tensor_training,
)
self.actor_network.train()
return rescaled_actions
| true | true |
f7f623553cd305bdd22a56c07eddc9471f94fad5 | 18,112 | py | Python | python/graphscope/nx/tests/classes/test_reportviews.py | lnfjpt/GraphScope | 917146f86d8387302a2e1de6963115e7568bf3ee | [
"Apache-2.0"
] | 1 | 2021-12-30T02:55:16.000Z | 2021-12-30T02:55:16.000Z | python/graphscope/nx/tests/classes/test_reportviews.py | lnfjpt/GraphScope | 917146f86d8387302a2e1de6963115e7568bf3ee | [
"Apache-2.0"
] | null | null | null | python/graphscope/nx/tests/classes/test_reportviews.py | lnfjpt/GraphScope | 917146f86d8387302a2e1de6963115e7568bf3ee | [
"Apache-2.0"
] | null | null | null | #
# This file is referred and derived from project NetworkX
#
# which has the following license:
#
# Copyright (C) 2004-2020, NetworkX Developers
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
# fmt: off
import os
import networkx
import pytest
from networkx.classes.reportviews import NodeDataView
from networkx.classes.tests.test_reportviews import TestDegreeView as _TestDegreeView
from networkx.classes.tests.test_reportviews import \
TestEdgeDataView as _TestEdgeDataView
from networkx.classes.tests.test_reportviews import TestEdgeView as _TestEdgeView
from networkx.classes.tests.test_reportviews import \
TestInDegreeView as _TestInDegreeView
from networkx.classes.tests.test_reportviews import \
TestInEdgeDataView as _TestInEdgeDataView
from networkx.classes.tests.test_reportviews import TestInEdgeView as _TestInEdgeView
from networkx.classes.tests.test_reportviews import \
TestNodeDataView as _TestNodeDataView
from networkx.classes.tests.test_reportviews import TestNodeView as _TestNodeView
from networkx.classes.tests.test_reportviews import \
TestNodeViewSetOps as _TestNodeViewSetOps
from networkx.classes.tests.test_reportviews import \
TestOutDegreeView as _TestOutDegreeView
from networkx.classes.tests.test_reportviews import \
TestOutEdgeDataView as _TestOutEdgeDataView
from networkx.classes.tests.test_reportviews import TestOutEdgeView as _TestOutEdgeView
from graphscope import nx
# fmt:on
# Nodes
@pytest.mark.usefixtures("graphscope_session")
class TestNodeView(_TestNodeView):
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = cls.G.nodes # NodeView(G)
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]"
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
def test_iter(self):
nv = self.nv
nlist = list(self.G)
# the order of iteration is not the same every time
assert sorted(nlist) == sorted(nv)
# odd case where NodeView calls NodeDataView with data=False
nnv = nv(data=False)
assert sorted(nlist) == sorted(nnv)
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestNodeDataView(_TestNodeDataView):
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = NodeDataView(cls.G)
cls.ndv = cls.G.nodes.data(True)
cls.nwv = cls.G.nodes.data("foo")
def test_pickle(self):
pass
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
msg = str([(n, {}) for n in range(9)])
assert str(self.ndv) == msg
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
assert repr(self.nv) == expected
expected = (
"NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, "
+ "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})"
)
assert repr(self.ndv) == expected
expected = (
"NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, "
+ "5: None, 6: None, 7: None, 8: None}, data='foo')"
)
assert repr(self.nwv) == expected
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
G = self.G.copy()
nlist = list(G)
nv = G.nodes.data()
ndv = G.nodes.data(True)
nwv = G.nodes.data("foo")
for i, (n, d) in enumerate(nv):
assert nlist[i] == n
assert d == {}
inv = iter(nv)
assert next(inv) == (0, {})
G.nodes[3]["foo"] = "bar"
# default
for n, d in nv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
# data=True
for n, d in ndv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
# data='foo'
for n, d in nwv:
if n == 3:
assert d == "bar"
else:
assert d is None
# data='foo', default=1
for n, d in G.nodes.data("foo", default=1):
if n == 3:
assert d == "bar"
else:
assert d == 1
@pytest.mark.usefixtures("graphscope_session")
class TestNodeViewSetOps(_TestNodeViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes
@pytest.mark.usefixtures("graphscope_session")
class TestNodeDataViewSetOps(TestNodeViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes.data("foo")
print("nv", cls.nv)
def n_its(self, nodes):
return {(node, "bar" if node == 3 else None) for node in nodes}
class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes.data("foo", default=1)
def n_its(self, nodes):
return {(node, "bar" if node == 3 else 1) for node in nodes}
# Edges Data View
@pytest.mark.usefixtures("graphscope_session")
class TestEdgeDataView(_TestEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = networkx.reportviews.EdgeView
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
# The node order of iteration is not start from 0 every time.
# assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
@pytest.mark.usefixtures("graphscope_session")
class TestOutEdgeDataView(_TestOutEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = networkx.reportviews.OutEdgeView
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
# The node order of iteration is not start from 0 every time.
# assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
@pytest.mark.usefixtures("graphscope_session")
class TestInEdgeDataView(_TestInEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = networkx.reportviews.InEdgeView
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) in ((0, 1), (1, 2))
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestEdgeView(_TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = networkx.reportviews.EdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) in rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) in rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_or(self):
# print("G | H edges:", gnv | hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result1 = {(n, n + 1) for n in range(8)}
result1.update(some_edges)
result2 = {(n + 1, n) for n in range(8)}
result2.update(some_edges)
assert (ev | some_edges) in (result1, result2)
assert (some_edges | ev) in (result1, result2)
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_xor(self):
# print("G ^ H edges:", gnv ^ hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
result = {(n, n + 1) for n in range(1, 8)}
result.update({(1, 0), (0, 2)})
assert ev ^ some_edges == result
else:
result = {(n, n + 1) for n in range(1, 8)}
result.update({(0, 2)})
assert ev ^ some_edges == result
return
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestOutEdgeView(_TestOutEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = networkx.reportviews.OutEdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestInEdgeView(_TestInEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = networkx.reportviews.InEdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) in ((0, 1), (1, 2))
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestDegreeView(_TestDegreeView):
GRAPH = nx.Graph
dview = networkx.reportviews.DegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)])
assert str(dv) == rep
dv = self.G.degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.dview(self.G)
rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
# weighted
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
print(type(self.G))
pass
class TestDiDegreeView(TestDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.DiDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.degree()
rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
@pytest.mark.usefixtures("graphscope_session")
class TestOutDegreeView(_TestOutDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.OutDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)])
assert str(dv) == rep
dv = self.G.out_degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.out_degree()
rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
# weighted
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestInDegreeView(_TestInDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.InDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)])
assert str(dv) == rep
dv = self.G.in_degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.in_degree()
rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
# weighted
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
pass
| 29.450407 | 87 | 0.561396 |
import os
import networkx
import pytest
from networkx.classes.reportviews import NodeDataView
from networkx.classes.tests.test_reportviews import TestDegreeView as _TestDegreeView
from networkx.classes.tests.test_reportviews import \
TestEdgeDataView as _TestEdgeDataView
from networkx.classes.tests.test_reportviews import TestEdgeView as _TestEdgeView
from networkx.classes.tests.test_reportviews import \
TestInDegreeView as _TestInDegreeView
from networkx.classes.tests.test_reportviews import \
TestInEdgeDataView as _TestInEdgeDataView
from networkx.classes.tests.test_reportviews import TestInEdgeView as _TestInEdgeView
from networkx.classes.tests.test_reportviews import \
TestNodeDataView as _TestNodeDataView
from networkx.classes.tests.test_reportviews import TestNodeView as _TestNodeView
from networkx.classes.tests.test_reportviews import \
TestNodeViewSetOps as _TestNodeViewSetOps
from networkx.classes.tests.test_reportviews import \
TestOutDegreeView as _TestOutDegreeView
from networkx.classes.tests.test_reportviews import \
TestOutEdgeDataView as _TestOutEdgeDataView
from networkx.classes.tests.test_reportviews import TestOutEdgeView as _TestOutEdgeView
from graphscope import nx
@pytest.mark.usefixtures("graphscope_session")
class TestNodeView(_TestNodeView):
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = cls.G.nodes
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]"
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
def test_iter(self):
nv = self.nv
nlist = list(self.G)
assert sorted(nlist) == sorted(nv)
nnv = nv(data=False)
assert sorted(nlist) == sorted(nnv)
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestNodeDataView(_TestNodeDataView):
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = NodeDataView(cls.G)
cls.ndv = cls.G.nodes.data(True)
cls.nwv = cls.G.nodes.data("foo")
def test_pickle(self):
pass
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
msg = str([(n, {}) for n in range(9)])
assert str(self.ndv) == msg
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
assert repr(self.nv) == expected
expected = (
"NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, "
+ "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})"
)
assert repr(self.ndv) == expected
expected = (
"NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, "
+ "5: None, 6: None, 7: None, 8: None}, data='foo')"
)
assert repr(self.nwv) == expected
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
G = self.G.copy()
nlist = list(G)
nv = G.nodes.data()
ndv = G.nodes.data(True)
nwv = G.nodes.data("foo")
for i, (n, d) in enumerate(nv):
assert nlist[i] == n
assert d == {}
inv = iter(nv)
assert next(inv) == (0, {})
G.nodes[3]["foo"] = "bar"
for n, d in nv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
for n, d in ndv:
if n == 3:
assert d == {"foo": "bar"}
else:
assert d == {}
for n, d in nwv:
if n == 3:
assert d == "bar"
else:
assert d is None
for n, d in G.nodes.data("foo", default=1):
if n == 3:
assert d == "bar"
else:
assert d == 1
@pytest.mark.usefixtures("graphscope_session")
class TestNodeViewSetOps(_TestNodeViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes
@pytest.mark.usefixtures("graphscope_session")
class TestNodeDataViewSetOps(TestNodeViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes.data("foo")
print("nv", cls.nv)
def n_its(self, nodes):
return {(node, "bar" if node == 3 else None) for node in nodes}
class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]["foo"] = "bar"
cls.nv = cls.G.nodes.data("foo", default=1)
def n_its(self, nodes):
return {(node, "bar" if node == 3 else 1) for node in nodes}
@pytest.mark.usefixtures("graphscope_session")
class TestEdgeDataView(_TestEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = networkx.reportviews.EdgeView
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
assert iter(ev) != ev
assert iter(iev) == iev
@pytest.mark.usefixtures("graphscope_session")
class TestOutEdgeDataView(_TestOutEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = networkx.reportviews.OutEdgeView
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
assert iter(ev) != ev
assert iter(iev) == iev
@pytest.mark.usefixtures("graphscope_session")
class TestInEdgeDataView(_TestInEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = networkx.reportviews.InEdgeView
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) in ((0, 1), (1, 2))
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestEdgeView(_TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = networkx.reportviews.EdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) in rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) in rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_or(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result1 = {(n, n + 1) for n in range(8)}
result1.update(some_edges)
result2 = {(n + 1, n) for n in range(8)}
result2.update(some_edges)
assert (ev | some_edges) in (result1, result2)
assert (some_edges | ev) in (result1, result2)
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_xor(self):
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
result = {(n, n + 1) for n in range(1, 8)}
result.update({(1, 0), (0, 2)})
assert ev ^ some_edges == result
else:
result = {(n, n + 1) for n in range(1, 8)}
result.update({(0, 2)})
assert ev ^ some_edges == result
return
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestOutEdgeView(_TestOutEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = networkx.reportviews.OutEdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestInEdgeView(_TestInEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = networkx.reportviews.InEdgeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
ev = self.eview(self.G)
rep = (
"InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) in ((0, 1), (1, 2))
assert iter(ev) != ev
assert iter(iev) == iev
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestDegreeView(_TestDegreeView):
GRAPH = nx.Graph
dview = networkx.reportviews.DegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)])
assert str(dv) == rep
dv = self.G.degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.dview(self.G)
rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
print(type(self.G))
pass
class TestDiDegreeView(TestDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.DiDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.degree()
rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
@pytest.mark.usefixtures("graphscope_session")
class TestOutDegreeView(_TestOutDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.OutDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)])
assert str(dv) == rep
dv = self.G.out_degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.out_degree()
rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
pass
@pytest.mark.usefixtures("graphscope_session")
class TestInDegreeView(_TestInDegreeView):
GRAPH = nx.DiGraph
dview = networkx.reportviews.InDegreeView
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)])
assert str(dv) == rep
dv = self.G.in_degree()
assert str(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_repr(self):
dv = self.G.in_degree()
rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})"
assert repr(dv) == rep
@pytest.mark.skipif(
os.environ.get("DEPLOYMENT", None) != "standalone",
reason="Only need to test on standalone",
)
def test_iter(self):
dv = self.dview(self.G)
nlist = list(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
dv = self.dview(self.G, weight="foo")
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (nlist[0], dv[nlist[0]])
assert next(idv) == (nlist[1], dv[nlist[1]])
def test_pickle(self):
pass
| true | true |
f7f6237fc7a77c65b100bd358433e664552c0ec9 | 12,055 | py | Python | nilearn/_utils/niimg_conversions.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | null | null | null | nilearn/_utils/niimg_conversions.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | null | null | null | nilearn/_utils/niimg_conversions.py | agramfort/nilearn | f075440e6d97b5bf359bb25e9197dbcbbc26e5f2 | [
"BSD-2-Clause"
] | null | null | null | """
Conversion utilities.
"""
# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais
# License: simplified BSD
import warnings
import numpy as np
import itertools
from sklearn.externals.joblib import Memory
from .cache_mixin import cache
from .niimg import _safe_get_data, load_niimg, new_img_like
from .compat import _basestring
def _check_fov(img, affine, shape):
""" Return True if img's field of view correspond to given
shape and affine, False elsewhere.
"""
img = check_niimg(img)
return (img.shape[:3] == shape and
np.allclose(img.get_affine(), affine))
def _check_same_fov(img1, img2):
""" Return True if img1 and img2 have the same field of view
(shape and affine), False elsewhere.
"""
img1 = check_niimg(img1)
img2 = check_niimg(img2)
return (img1.shape[:3] == img2.shape[:3]
and np.allclose(img1.get_affine(), img2.get_affine()))
def _index_img(img, index):
"""Helper function for check_niimg_4d."""
return new_img_like(
img, img.get_data()[:, :, :, index], img.get_affine(),
copy_header=True)
def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False,
target_fov=None,
memory=Memory(cachedir=None),
memory_level=0, verbose=0):
"""Iterate over a list of niimgs and do sanity checks and resampling
Parameters
----------
niimgs: list of niimg
Image to iterate over
ensure_ndim: integer, optional
If specified, an error is raised if the data does not have the
required dimension.
atleast_4d: boolean, optional
If True, any 3D image is converted to a 4D single scan.
target_fov: tuple of affine and shape
If specified, images are resampled to this field of view
"""
ref_fov = None
resample_to_first_img = False
ndim_minus_one = ensure_ndim - 1 if ensure_ndim is not None else None
if target_fov is not None and target_fov != "first":
ref_fov = target_fov
for i, niimg in enumerate(niimgs):
try:
niimg = check_niimg(
niimg, ensure_ndim=ndim_minus_one, atleast_4d=atleast_4d)
if i == 0:
ndim_minus_one = len(niimg.shape)
if ref_fov is None:
ref_fov = (niimg.get_affine(), niimg.shape[:3])
resample_to_first_img = True
if not _check_fov(niimg, ref_fov[0], ref_fov[1]):
if target_fov is not None:
from nilearn import image # we avoid a circular import
if resample_to_first_img:
warnings.warn('Affine is different across subjects.'
' Realignement on first subject '
'affine forced')
niimg = cache(
image.resample_img, memory, func_memory_level=2,
memory_level=memory_level)(
niimg, target_affine=ref_fov[0],
target_shape=ref_fov[1])
else:
raise ValueError(
"Field of view of image #%d is different from "
"reference FOV.\n"
"Reference affine:\n%r\nImage affine:\n%r\n"
"Reference shape:\n%r\nImage shape:\n%r\n"
% (i, ref_fov[0], niimg.get_affine(), ref_fov[1],
niimg.shape))
yield niimg
except TypeError as exc:
img_name = ''
if isinstance(niimg, _basestring):
img_name = " (%s) " % niimg
exc.args = (('Error encountered while loading image #%d%s'
% (i, img_name),) + exc.args)
raise
def check_niimg(niimg, ensure_ndim=None, atleast_4d=False,
return_iterator=False):
"""Check that niimg is a proper 3D/4D niimg. Turn filenames into objects.
Parameters
----------
niimg: Niimg-like object
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
ensure_ndim: integer {3, 4}, optional
Indicate the dimensionality of the expected niimg. An
error is raised if the niimg is of another dimensionality.
atleast_4d: boolean, optional
Indicates if a 3d image should be turned into a single-scan 4d niimg.
Returns
-------
result: 3D/4D Niimg-like object
Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed
that the returned object has get_data() and get_affine() methods.
Notes
-----
In nilearn, special care has been taken to make image manipulation easy.
This method is a kind of pre-requisite for any data processing method in
nilearn because it checks if data have a correct format and loads them if
necessary.
Its application is idempotent.
"""
# in case of an iterable
if hasattr(niimg, "__iter__") and not isinstance(niimg, _basestring):
if ensure_ndim == 3:
raise TypeError(
"Data must be a 3D Niimg-like object but you provided a %s."
" See http://nilearn.github.io/building_blocks/"
"manipulating_mr_images.html#niimg." % type(niimg))
if return_iterator:
return _iter_check_niimg(niimg, ensure_ndim=ensure_ndim)
return concat_niimgs(niimg, ensure_ndim=ensure_ndim)
# Otherwise, it should be a filename or a SpatialImage, we load it
niimg = load_niimg(niimg)
if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1:
# "squeeze" the image.
data = _safe_get_data(niimg)
affine = niimg.get_affine()
niimg = new_img_like(niimg, data[:, :, :, 0], affine)
if atleast_4d and len(niimg.shape) == 3:
data = niimg.get_data().view()
data.shape = data.shape + (1, )
niimg = new_img_like(niimg, data, niimg.get_affine())
if ensure_ndim is not None and len(niimg.shape) != ensure_ndim:
raise TypeError(
"Data must be a %iD Niimg-like object but you provided an "
"image of shape %s. See "
"http://nilearn.github.io/building_blocks/"
"manipulating_mr_images.html#niimg." % (ensure_ndim, niimg.shape))
if return_iterator:
return (_index_img(niimg, i) for i in range(niimg.shape[3]))
return niimg
def check_niimg_3d(niimg):
"""Check that niimg is a proper 3D niimg-like object and load it.
Parameters
----------
niimg: Niimg-like object
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
result: 3D Niimg-like object
Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed
that the returned object has get_data() and get_affine() methods.
Notes
-----
In nilearn, special care has been taken to make image manipulation easy.
This method is a kind of pre-requisite for any data processing method in
nilearn because it checks if data have a correct format and loads them if
necessary.
Its application is idempotent.
"""
return check_niimg(niimg, ensure_ndim=3)
def check_niimg_4d(niimg, return_iterator=False):
"""Check that niimg is a proper 4D niimg-like object and load it.
Parameters
----------
niimg: 4D Niimg-like object
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
If niimgs is an iterable, checks if data is really 4D. Then,
considering that it is a list of niimg and load them one by one.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data
and get_affine methods are present, raise an Exception otherwise.
return_iterator: boolean
If True, an iterator of 3D images is returned. This reduces the memory
usage when `niimgs` contains 3D images.
If False, a single 4D image is returned. When `niimgs` contains 3D
images they are concatenated together.
Returns
-------
niimg: 4D nibabel.Nifti1Image or iterator of 3D nibabel.Nifti1Image
Notes
-----
This function is the equivalent to check_niimg_3d() for Niimg-like objects
with a session level.
Its application is idempotent.
"""
return check_niimg(niimg, ensure_ndim=4, return_iterator=return_iterator)
def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None,
memory=Memory(cachedir=None), memory_level=0,
auto_resample=False, verbose=0):
"""Concatenate a list of 3D/4D niimgs of varying lengths.
The niimgs list can contain niftis/paths to images of varying dimensions
(i.e., 3D or 4D) as well as different 3D shapes and affines, as they
will be matched to the first image in the list if auto_resample=True.
Parameters
----------
niimgs: iterable of Niimg-like objects
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
Niimgs to concatenate.
dtype: numpy dtype, optional
the dtype of the returned image
ensure_ndim: integer, optional
Indicate the dimensionality of the expected niimg. An
error is raised if the niimg is of another dimensionality.
auto_resample: boolean
Converts all images to the space of the first one.
verbose: int
Controls the amount of verbosity (0 means no messages).
memory : instance of joblib.Memory or string
Used to cache the resampling process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level : integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
Returns
-------
concatenated: nibabel.Nifti1Image
A single image.
"""
target_fov = 'first' if auto_resample else None
# First niimg is extracted to get information and for new_img_like
first_niimg = None
iterator, literator = itertools.tee(iter(niimgs))
try:
first_niimg = check_niimg(next(literator))
except StopIteration:
raise TypeError('Cannot concatenate empty objects')
if ensure_ndim is None:
ndim = len(first_niimg.shape)
else:
ndim = ensure_ndim - 1
lengths = [first_niimg.shape[-1] if ndim == 4 else 1]
for niimg in literator:
# We check the dimensionality of the niimg
niimg = check_niimg(niimg, ensure_ndim=ndim)
lengths.append(niimg.shape[-1] if ndim == 4 else 1)
target_shape = first_niimg.shape[:3]
data = np.ndarray(target_shape + (sum(lengths), ),
order="F", dtype=dtype)
cur_4d_index = 0
for index, (size, niimg) in enumerate(zip(lengths, _iter_check_niimg(
iterator, atleast_4d=True, target_fov=target_fov,
memory=memory, memory_level=memory_level))):
if verbose > 0:
if isinstance(niimg, _basestring):
nii_str = "image " + niimg
else:
nii_str = "image #" + str(index)
print("Concatenating {0}: {1}".format(index + 1, nii_str))
data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data()
cur_4d_index += size
return new_img_like(first_niimg, data, first_niimg.get_affine())
| 36.865443 | 87 | 0.630942 |
import warnings
import numpy as np
import itertools
from sklearn.externals.joblib import Memory
from .cache_mixin import cache
from .niimg import _safe_get_data, load_niimg, new_img_like
from .compat import _basestring
def _check_fov(img, affine, shape):
img = check_niimg(img)
return (img.shape[:3] == shape and
np.allclose(img.get_affine(), affine))
def _check_same_fov(img1, img2):
img1 = check_niimg(img1)
img2 = check_niimg(img2)
return (img1.shape[:3] == img2.shape[:3]
and np.allclose(img1.get_affine(), img2.get_affine()))
def _index_img(img, index):
return new_img_like(
img, img.get_data()[:, :, :, index], img.get_affine(),
copy_header=True)
def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False,
target_fov=None,
memory=Memory(cachedir=None),
memory_level=0, verbose=0):
ref_fov = None
resample_to_first_img = False
ndim_minus_one = ensure_ndim - 1 if ensure_ndim is not None else None
if target_fov is not None and target_fov != "first":
ref_fov = target_fov
for i, niimg in enumerate(niimgs):
try:
niimg = check_niimg(
niimg, ensure_ndim=ndim_minus_one, atleast_4d=atleast_4d)
if i == 0:
ndim_minus_one = len(niimg.shape)
if ref_fov is None:
ref_fov = (niimg.get_affine(), niimg.shape[:3])
resample_to_first_img = True
if not _check_fov(niimg, ref_fov[0], ref_fov[1]):
if target_fov is not None:
from nilearn import image
if resample_to_first_img:
warnings.warn('Affine is different across subjects.'
' Realignement on first subject '
'affine forced')
niimg = cache(
image.resample_img, memory, func_memory_level=2,
memory_level=memory_level)(
niimg, target_affine=ref_fov[0],
target_shape=ref_fov[1])
else:
raise ValueError(
"Field of view of image #%d is different from "
"reference FOV.\n"
"Reference affine:\n%r\nImage affine:\n%r\n"
"Reference shape:\n%r\nImage shape:\n%r\n"
% (i, ref_fov[0], niimg.get_affine(), ref_fov[1],
niimg.shape))
yield niimg
except TypeError as exc:
img_name = ''
if isinstance(niimg, _basestring):
img_name = " (%s) " % niimg
exc.args = (('Error encountered while loading image #%d%s'
% (i, img_name),) + exc.args)
raise
def check_niimg(niimg, ensure_ndim=None, atleast_4d=False,
return_iterator=False):
if hasattr(niimg, "__iter__") and not isinstance(niimg, _basestring):
if ensure_ndim == 3:
raise TypeError(
"Data must be a 3D Niimg-like object but you provided a %s."
" See http://nilearn.github.io/building_blocks/"
"manipulating_mr_images.html#niimg." % type(niimg))
if return_iterator:
return _iter_check_niimg(niimg, ensure_ndim=ensure_ndim)
return concat_niimgs(niimg, ensure_ndim=ensure_ndim)
niimg = load_niimg(niimg)
if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1:
data = _safe_get_data(niimg)
affine = niimg.get_affine()
niimg = new_img_like(niimg, data[:, :, :, 0], affine)
if atleast_4d and len(niimg.shape) == 3:
data = niimg.get_data().view()
data.shape = data.shape + (1, )
niimg = new_img_like(niimg, data, niimg.get_affine())
if ensure_ndim is not None and len(niimg.shape) != ensure_ndim:
raise TypeError(
"Data must be a %iD Niimg-like object but you provided an "
"image of shape %s. See "
"http://nilearn.github.io/building_blocks/"
"manipulating_mr_images.html#niimg." % (ensure_ndim, niimg.shape))
if return_iterator:
return (_index_img(niimg, i) for i in range(niimg.shape[3]))
return niimg
def check_niimg_3d(niimg):
return check_niimg(niimg, ensure_ndim=3)
def check_niimg_4d(niimg, return_iterator=False):
return check_niimg(niimg, ensure_ndim=4, return_iterator=return_iterator)
def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None,
memory=Memory(cachedir=None), memory_level=0,
auto_resample=False, verbose=0):
target_fov = 'first' if auto_resample else None
first_niimg = None
iterator, literator = itertools.tee(iter(niimgs))
try:
first_niimg = check_niimg(next(literator))
except StopIteration:
raise TypeError('Cannot concatenate empty objects')
if ensure_ndim is None:
ndim = len(first_niimg.shape)
else:
ndim = ensure_ndim - 1
lengths = [first_niimg.shape[-1] if ndim == 4 else 1]
for niimg in literator:
niimg = check_niimg(niimg, ensure_ndim=ndim)
lengths.append(niimg.shape[-1] if ndim == 4 else 1)
target_shape = first_niimg.shape[:3]
data = np.ndarray(target_shape + (sum(lengths), ),
order="F", dtype=dtype)
cur_4d_index = 0
for index, (size, niimg) in enumerate(zip(lengths, _iter_check_niimg(
iterator, atleast_4d=True, target_fov=target_fov,
memory=memory, memory_level=memory_level))):
if verbose > 0:
if isinstance(niimg, _basestring):
nii_str = "image " + niimg
else:
nii_str = "image #" + str(index)
print("Concatenating {0}: {1}".format(index + 1, nii_str))
data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data()
cur_4d_index += size
return new_img_like(first_niimg, data, first_niimg.get_affine())
| true | true |
f7f6241ec5ac3f557a9cf8ec0c104e3f5bcfc370 | 3,780 | py | Python | advanced_filters/views.py | Bersam/django-advanced-filters | f1bcd00ed0320fd905112b72610143d1fa9a9c33 | [
"MIT"
] | null | null | null | advanced_filters/views.py | Bersam/django-advanced-filters | f1bcd00ed0320fd905112b72610143d1fa9a9c33 | [
"MIT"
] | null | null | null | advanced_filters/views.py | Bersam/django-advanced-filters | f1bcd00ed0320fd905112b72610143d1fa9a9c33 | [
"MIT"
] | 1 | 2017-01-23T19:14:35.000Z | 2017-01-23T19:14:35.000Z | from operator import itemgetter
import logging
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
# django < 1.7 support
from django.db.models import get_model
from django.conf import settings
try:
from django.contrib.admin.utils import get_fields_from_path
except ImportError:
# django < 1.7 support
from django.contrib.admin.util import get_fields_from_path
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.encoding import force_text
from django.views.generic import View
from braces.views import (CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin)
logger = logging.getLogger('advanced_filters.views')
class GetFieldChoices(CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin, View):
"""
A JSONResponse view that accepts a model and a field (path to field),
resolves and returns the valid choices for that field.
Model must use the "app.Model" notation.
If this field is not a simple Integer/CharField with predefined choices,
all distinct entries in the DB are presented, unless field name is in
ADVANCED_FILTERS_DISABLE_FOR_FIELDS and limited to display only results
under ADVANCED_FILTERS_MAX_CHOICES.
"""
def get(self, request, model=None, field_name=None):
if model is field_name is None:
return self.render_json_response(
{'error': "GetFieldChoices view requires 2 arguments"},
status=400)
app_label, model_name = model.split('.', 1)
try:
model_obj = get_model(app_label, model_name)
field = get_fields_from_path(model_obj, field_name)[-1]
model_obj = field.model # use new model if followed a ForeignKey
except AttributeError as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': "No installed app/model: %s" % model}, status=400)
except (LookupError, FieldDoesNotExist) as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': force_text(e)}, status=400)
choices = field.choices
# if no choices, populate with distinct values from instances
if not choices:
choices = []
disabled = getattr(settings, 'ADVANCED_FILTERS_DISABLE_FOR_FIELDS',
tuple())
max_choices = getattr(settings, 'ADVANCED_FILTERS_MAX_CHOICES', 254)
if field.name in disabled:
logger.debug('Skipped lookup of choices for disabled fields')
elif isinstance(field, (models.BooleanField, models.DateField,
models.TimeField)):
logger.debug('No choices calculated for field %s of type %s',
field, type(field))
else:
# the order_by() avoids ambiguity with values() and distinct()
choices = model_obj.objects.order_by(field.name).values_list(
field.name, flat=True).distinct()
# additional query is ok to avoid fetching too many values
if choices.count() <= max_choices:
choices = zip(choices, choices)
logger.debug('Choices found for field %s: %s',
field.name, choices)
else:
choices = []
results = [{'id': c[0], 'text': force_text(c[1])} for c in sorted(
choices, key=itemgetter(0))]
return self.render_json_response({'results': results})
| 43.448276 | 80 | 0.626984 | from operator import itemgetter
import logging
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models import get_model
from django.conf import settings
try:
from django.contrib.admin.utils import get_fields_from_path
except ImportError:
from django.contrib.admin.util import get_fields_from_path
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.encoding import force_text
from django.views.generic import View
from braces.views import (CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin)
logger = logging.getLogger('advanced_filters.views')
class GetFieldChoices(CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin, View):
def get(self, request, model=None, field_name=None):
if model is field_name is None:
return self.render_json_response(
{'error': "GetFieldChoices view requires 2 arguments"},
status=400)
app_label, model_name = model.split('.', 1)
try:
model_obj = get_model(app_label, model_name)
field = get_fields_from_path(model_obj, field_name)[-1]
model_obj = field.model
except AttributeError as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': "No installed app/model: %s" % model}, status=400)
except (LookupError, FieldDoesNotExist) as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': force_text(e)}, status=400)
choices = field.choices
if not choices:
choices = []
disabled = getattr(settings, 'ADVANCED_FILTERS_DISABLE_FOR_FIELDS',
tuple())
max_choices = getattr(settings, 'ADVANCED_FILTERS_MAX_CHOICES', 254)
if field.name in disabled:
logger.debug('Skipped lookup of choices for disabled fields')
elif isinstance(field, (models.BooleanField, models.DateField,
models.TimeField)):
logger.debug('No choices calculated for field %s of type %s',
field, type(field))
else:
choices = model_obj.objects.order_by(field.name).values_list(
field.name, flat=True).distinct()
if choices.count() <= max_choices:
choices = zip(choices, choices)
logger.debug('Choices found for field %s: %s',
field.name, choices)
else:
choices = []
results = [{'id': c[0], 'text': force_text(c[1])} for c in sorted(
choices, key=itemgetter(0))]
return self.render_json_response({'results': results})
| true | true |
f7f6247b8658fcb1b45d043bf87da4f954719256 | 954 | py | Python | settings.py | jin-cc/bastion-test | 9feecbe927e5446213ab25b4da4a5eca23cf6bae | [
"Apache-2.0"
] | 8 | 2021-12-23T03:33:10.000Z | 2022-03-29T03:29:01.000Z | settings.py | jin-cc/bastion-test | 9feecbe927e5446213ab25b4da4a5eca23cf6bae | [
"Apache-2.0"
] | null | null | null | settings.py | jin-cc/bastion-test | 9feecbe927e5446213ab25b4da4a5eca23cf6bae | [
"Apache-2.0"
] | 6 | 2021-12-23T03:33:05.000Z | 2022-03-03T11:11:23.000Z | # -*- coding: utf-8 -*-
"""
请不要修改该文件
如果你需要对settings里的内容做修改,config/default.py 文件中 添加即可
如有任何疑问,请联系 【蓝鲸助手】
"""
import os
run_env = ""
# V3判断环境的环境变量为BKPAAS_ENVIRONMENT
if 'BKPAAS_ENVIRONMENT' in os.environ:
ENVIRONMENT = os.getenv('BKPAAS_ENVIRONMENT', 'dev')
run_env = "dev"
# V2判断环境的环境变量为BK_ENV
else:
PAAS_V2_ENVIRONMENT = os.environ.get('BK_ENV', 'development')
ENVIRONMENT = {
'development': 'dev',
'testing': 'stag',
'production': 'prod',
}.get(PAAS_V2_ENVIRONMENT)
run_env = ENVIRONMENT
DJANGO_CONF_MODULE = 'config.{env}'.format(env=ENVIRONMENT)
try:
_module = __import__(DJANGO_CONF_MODULE, globals(), locals(), ['*'])
except ImportError as e:
raise ImportError("Could not import config '%s' (Is it on sys.path?): %s"
% (DJANGO_CONF_MODULE, e))
for _setting in dir(_module):
if _setting == _setting.upper():
locals()[_setting] = getattr(_module, _setting)
| 27.257143 | 77 | 0.660377 |
import os
run_env = ""
if 'BKPAAS_ENVIRONMENT' in os.environ:
ENVIRONMENT = os.getenv('BKPAAS_ENVIRONMENT', 'dev')
run_env = "dev"
else:
PAAS_V2_ENVIRONMENT = os.environ.get('BK_ENV', 'development')
ENVIRONMENT = {
'development': 'dev',
'testing': 'stag',
'production': 'prod',
}.get(PAAS_V2_ENVIRONMENT)
run_env = ENVIRONMENT
DJANGO_CONF_MODULE = 'config.{env}'.format(env=ENVIRONMENT)
try:
_module = __import__(DJANGO_CONF_MODULE, globals(), locals(), ['*'])
except ImportError as e:
raise ImportError("Could not import config '%s' (Is it on sys.path?): %s"
% (DJANGO_CONF_MODULE, e))
for _setting in dir(_module):
if _setting == _setting.upper():
locals()[_setting] = getattr(_module, _setting)
| true | true |
f7f624ef07680af5bcea54f212e0eb11ddb2c121 | 2,480 | py | Python | yt/data_objects/tests/test_points.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 1 | 2021-11-29T21:59:06.000Z | 2021-11-29T21:59:06.000Z | yt/data_objects/tests/test_points.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/data_objects/tests/test_points.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 1 | 2020-07-17T02:55:58.000Z | 2020-07-17T02:55:58.000Z | import numpy as np
import yt
from yt.testing import \
fake_random_ds, \
assert_equal, \
requires_file
def setup():
from yt.config import ytcfg
ytcfg["yt","__withintesting"] = "True"
def test_point_creation():
ds = fake_random_ds(16)
p1 = ds.point(ds.domain_center)
p2 = ds.point([0.5, 0.5, 0.5])
p3 = ds.point([0.5, 0.5, 0.5]*yt.units.cm)
# ensure all three points are really at the same position
for fname in 'xyz':
assert_equal(p1[fname], p2[fname])
assert_equal(p1[fname], p3[fname])
def test_domain_point():
nparticles = 3
ds = fake_random_ds(16, particles=nparticles)
p = ds.point(ds.domain_center)
# ensure accessing one field works, store for comparison later
point_den = p['density']
point_vel = p['velocity_x']
ad = ds.all_data()
ppos = ad['all', 'particle_position']
fpoint_den = ds.find_field_values_at_point('density', ds.domain_center)
fpoint_den_vel = ds.find_field_values_at_point(
['density', 'velocity_x'], ds.domain_center)
assert_equal(point_den, fpoint_den)
assert_equal(point_den, fpoint_den_vel[0])
assert_equal(point_vel, fpoint_den_vel[1])
ppos_den = ds.find_field_values_at_points('density', ppos)
ppos_vel = ds.find_field_values_at_points('velocity_x', ppos)
ppos_den_vel = ds.find_field_values_at_points(
['density', 'velocity_x'], ppos)
assert_equal(ppos_den.shape, (nparticles,))
assert_equal(ppos_vel.shape, (nparticles,))
assert_equal(len(ppos_den_vel), 2)
assert_equal(ppos_den_vel[0], ppos_den)
assert_equal(ppos_den_vel[1], ppos_vel)
g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@requires_file(g30)
def test_fast_find_field_values_at_points():
ds = yt.load(g30)
ad = ds.all_data()
# right now this is slow for large numbers of particles, so randomly
# sample 100 particles
nparticles = 100
ppos = ad['all', 'particle_position']
ppos = ppos[np.random.random_integers(0, len(ppos), size=nparticles)]
ppos_den = ds.find_field_values_at_points('density', ppos)
ppos_vel = ds.find_field_values_at_points('velocity_x', ppos)
ppos_den_vel = ds.find_field_values_at_points(
['density', 'velocity_x'], ppos)
assert_equal(ppos_den.shape, (nparticles,))
assert_equal(ppos_vel.shape, (nparticles,))
assert_equal(len(ppos_den_vel), 2)
assert_equal(ppos_den_vel[0], ppos_den)
assert_equal(ppos_den_vel[1], ppos_vel)
| 31.794872 | 75 | 0.7 | import numpy as np
import yt
from yt.testing import \
fake_random_ds, \
assert_equal, \
requires_file
def setup():
from yt.config import ytcfg
ytcfg["yt","__withintesting"] = "True"
def test_point_creation():
ds = fake_random_ds(16)
p1 = ds.point(ds.domain_center)
p2 = ds.point([0.5, 0.5, 0.5])
p3 = ds.point([0.5, 0.5, 0.5]*yt.units.cm)
for fname in 'xyz':
assert_equal(p1[fname], p2[fname])
assert_equal(p1[fname], p3[fname])
def test_domain_point():
nparticles = 3
ds = fake_random_ds(16, particles=nparticles)
p = ds.point(ds.domain_center)
point_den = p['density']
point_vel = p['velocity_x']
ad = ds.all_data()
ppos = ad['all', 'particle_position']
fpoint_den = ds.find_field_values_at_point('density', ds.domain_center)
fpoint_den_vel = ds.find_field_values_at_point(
['density', 'velocity_x'], ds.domain_center)
assert_equal(point_den, fpoint_den)
assert_equal(point_den, fpoint_den_vel[0])
assert_equal(point_vel, fpoint_den_vel[1])
ppos_den = ds.find_field_values_at_points('density', ppos)
ppos_vel = ds.find_field_values_at_points('velocity_x', ppos)
ppos_den_vel = ds.find_field_values_at_points(
['density', 'velocity_x'], ppos)
assert_equal(ppos_den.shape, (nparticles,))
assert_equal(ppos_vel.shape, (nparticles,))
assert_equal(len(ppos_den_vel), 2)
assert_equal(ppos_den_vel[0], ppos_den)
assert_equal(ppos_den_vel[1], ppos_vel)
g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@requires_file(g30)
def test_fast_find_field_values_at_points():
ds = yt.load(g30)
ad = ds.all_data()
nparticles = 100
ppos = ad['all', 'particle_position']
ppos = ppos[np.random.random_integers(0, len(ppos), size=nparticles)]
ppos_den = ds.find_field_values_at_points('density', ppos)
ppos_vel = ds.find_field_values_at_points('velocity_x', ppos)
ppos_den_vel = ds.find_field_values_at_points(
['density', 'velocity_x'], ppos)
assert_equal(ppos_den.shape, (nparticles,))
assert_equal(ppos_vel.shape, (nparticles,))
assert_equal(len(ppos_den_vel), 2)
assert_equal(ppos_den_vel[0], ppos_den)
assert_equal(ppos_den_vel[1], ppos_vel)
| true | true |
f7f625ae8448b77e1d829ba8037dc8d8f88e4f5c | 9,355 | py | Python | hw_asr/trainer/trainer.py | Mrrrat/asr_project_template | 50d264684d90bc45c59f3e9be5766fabaf090d25 | [
"MIT"
] | null | null | null | hw_asr/trainer/trainer.py | Mrrrat/asr_project_template | 50d264684d90bc45c59f3e9be5766fabaf090d25 | [
"MIT"
] | null | null | null | hw_asr/trainer/trainer.py | Mrrrat/asr_project_template | 50d264684d90bc45c59f3e9be5766fabaf090d25 | [
"MIT"
] | null | null | null | import random
from random import shuffle
import PIL
import torch
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torchvision.transforms import ToTensor
from tqdm import tqdm
from hw_asr.base import BaseTrainer
from hw_asr.logger.utils import plot_spectrogram_to_buf
from hw_asr.metric.utils import calc_cer, calc_wer
from hw_asr.utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(
self,
model,
criterion,
metrics,
optimizer,
config,
device,
data_loader,
text_encoder,
valid_data_loader=None,
lr_scheduler=None,
len_epoch=None,
skip_oom=True,
):
super().__init__(model, criterion, metrics, optimizer, config, device)
self.skip_oom = skip_oom
self.text_encoder = text_encoder
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
###OBO
#self.data_loader = inf_loop(data_loader)
###OBO
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
###OBO
self.do_validation = False #self.valid_data_loader is not None
###OBO
self.lr_scheduler = lr_scheduler
self.log_step = 10
self.train_metrics = MetricTracker(
"loss", "grad norm", *[m.name for m in self.metrics], writer=self.writer
)
self.valid_metrics = MetricTracker(
"loss", *[m.name for m in self.metrics], writer=self.writer
)
@staticmethod
def move_batch_to_device(batch, device: torch.device):
"""
Move all necessary tensors to the HPU
"""
for tensor_for_gpu in ["spectrogram", "text_encoded"]:
batch[tensor_for_gpu] = batch[tensor_for_gpu].to(device)
return batch
def _clip_grad_norm(self):
if self.config["trainer"].get("grad_norm_clip", None) is not None:
clip_grad_norm_(
self.model.parameters(), self.config["trainer"]["grad_norm_clip"]
)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_metrics.reset()
self.writer.add_scalar("epoch", epoch)
for batch_idx, batch in enumerate(
tqdm(self.data_loader, desc="train", total=self.len_epoch)
):
###OBO
if batch_idx >= self.len_epoch:
break
###OBO
try:
batch = self.process_batch(
batch,
is_train=True,
metrics=self.train_metrics,
)
except RuntimeError as e:
if "out of memory" in str(e) and self.skip_oom:
self.logger.warning("OOM on batch. Skipping batch.")
for p in self.model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
self.train_metrics.update("grad norm", self.get_grad_norm())
if batch_idx % self.log_step == 0:
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.logger.debug(
"Train Epoch: {} {} Loss: {:.6f}".format(
epoch, self._progress(batch_idx), batch["loss"].item()
)
)
self.writer.add_scalar(
"learning rate", self.lr_scheduler.get_last_lr()[0]
)
self._log_predictions(part="train", **batch)
self._log_spectrogram(batch["spectrogram"])
self._log_scalars(self.train_metrics)
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{"val_" + k: v for k, v in val_log.items()})
return log
def process_batch(self, batch, is_train: bool, metrics: MetricTracker):
batch = self.move_batch_to_device(batch, self.device)
if is_train:
self.optimizer.zero_grad()
outputs = self.model(**batch)
if type(outputs) is dict:
batch.update(outputs)
else:
batch["logits"] = outputs
batch["log_probs"] = F.log_softmax(batch["logits"], dim=-1)
batch["log_probs_length"] = self.model.transform_input_lengths(
batch["spectrogram_length"]
)
batch["loss"] = self.criterion(**batch)
if is_train:
batch["loss"].backward()
self._clip_grad_norm()
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
metrics.update("loss", batch["loss"].item())
for met in self.metrics:
metrics.update(met.name, met(**batch))
return batch
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, batch in tqdm(
enumerate(self.valid_data_loader),
desc="validation",
total=len(self.valid_data_loader),
):
batch = self.process_batch(
batch,
is_train=False,
metrics=self.valid_metrics,
)
self.writer.set_step(epoch * self.len_epoch, "valid")
self._log_scalars(self.valid_metrics)
self._log_predictions(part="val", **batch)
self._log_spectrogram(batch["spectrogram"])
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins="auto")
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = "[{}/{} ({:.0f}%)]"
if hasattr(self.data_loader, "n_samples"):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
def _log_predictions(
self,
text,
log_probs,
log_probs_length,
examples_to_log=20,
*args,
**kwargs,
):
# TODO: implement logging of beam search results
if self.writer is None:
return
argmax_inds = log_probs.cpu().argmax(-1)
argmax_inds = [
inds[: int(ind_len)]
for inds, ind_len in zip(argmax_inds, log_probs_length)
]
argmax_texts_raw = [self.text_encoder.decode(inds) for inds in argmax_inds]
argmax_texts = [self.text_encoder.ctc_decode(inds) for inds in argmax_inds]
tuples = list(zip(argmax_texts, text, argmax_texts_raw))
shuffle(tuples)
to_log_pred = []
to_log_pred_raw = []
for pred, target, raw_pred in tuples[:examples_to_log]:
wer = calc_wer(target, pred) * 100
cer = calc_cer(target, pred) * 100
to_log_pred.append(
f"true: '{target}' | pred: '{pred}' "
f"| wer: {wer:.2f} | cer: {cer:.2f}"
)
to_log_pred_raw.append(f"true: '{target}' | pred: '{raw_pred}'\n")
self.writer.add_text(f"predictions", "< < < < > > > >".join(to_log_pred))
self.writer.add_text(
f"predictions_raw", "< < < < > > > >".join(to_log_pred_raw)
)
def _log_spectrogram(self, spectrogram_batch):
spectrogram = random.choice(spectrogram_batch)
image = PIL.Image.open(plot_spectrogram_to_buf(spectrogram.cpu().log()))
self.writer.add_image("spectrogram", ToTensor()(image))
@torch.no_grad()
def get_grad_norm(self, norm_type=2):
parameters = self.model.parameters()
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
total_norm = torch.norm(
torch.stack(
[torch.norm(p.grad.detach(), norm_type).cpu() for p in parameters]
),
norm_type,
)
return total_norm.item()
def _log_scalars(self, metric_tracker: MetricTracker):
if self.writer is None:
return
for metric_name in metric_tracker.keys():
self.writer.add_scalar(f"{metric_name}", metric_tracker.avg(metric_name))
| 35.570342 | 85 | 0.560128 | import random
from random import shuffle
import PIL
import torch
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torchvision.transforms import ToTensor
from tqdm import tqdm
from hw_asr.base import BaseTrainer
from hw_asr.logger.utils import plot_spectrogram_to_buf
from hw_asr.metric.utils import calc_cer, calc_wer
from hw_asr.utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
def __init__(
self,
model,
criterion,
metrics,
optimizer,
config,
device,
data_loader,
text_encoder,
valid_data_loader=None,
lr_scheduler=None,
len_epoch=None,
skip_oom=True,
):
super().__init__(model, criterion, metrics, optimizer, config, device)
self.skip_oom = skip_oom
self.text_encoder = text_encoder
self.config = config
self.data_loader = data_loader
if len_epoch is None:
self.len_epoch = len(self.data_loader)
else:
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = False
self.lr_scheduler = lr_scheduler
self.log_step = 10
self.train_metrics = MetricTracker(
"loss", "grad norm", *[m.name for m in self.metrics], writer=self.writer
)
self.valid_metrics = MetricTracker(
"loss", *[m.name for m in self.metrics], writer=self.writer
)
@staticmethod
def move_batch_to_device(batch, device: torch.device):
for tensor_for_gpu in ["spectrogram", "text_encoded"]:
batch[tensor_for_gpu] = batch[tensor_for_gpu].to(device)
return batch
def _clip_grad_norm(self):
if self.config["trainer"].get("grad_norm_clip", None) is not None:
clip_grad_norm_(
self.model.parameters(), self.config["trainer"]["grad_norm_clip"]
)
def _train_epoch(self, epoch):
self.model.train()
self.train_metrics.reset()
self.writer.add_scalar("epoch", epoch)
for batch_idx, batch in enumerate(
tqdm(self.data_loader, desc="train", total=self.len_epoch)
):
if batch_idx >= self.len_epoch:
break
try:
batch = self.process_batch(
batch,
is_train=True,
metrics=self.train_metrics,
)
except RuntimeError as e:
if "out of memory" in str(e) and self.skip_oom:
self.logger.warning("OOM on batch. Skipping batch.")
for p in self.model.parameters():
if p.grad is not None:
del p.grad
torch.cuda.empty_cache()
continue
else:
raise e
self.train_metrics.update("grad norm", self.get_grad_norm())
if batch_idx % self.log_step == 0:
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.logger.debug(
"Train Epoch: {} {} Loss: {:.6f}".format(
epoch, self._progress(batch_idx), batch["loss"].item()
)
)
self.writer.add_scalar(
"learning rate", self.lr_scheduler.get_last_lr()[0]
)
self._log_predictions(part="train", **batch)
self._log_spectrogram(batch["spectrogram"])
self._log_scalars(self.train_metrics)
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{"val_" + k: v for k, v in val_log.items()})
return log
def process_batch(self, batch, is_train: bool, metrics: MetricTracker):
batch = self.move_batch_to_device(batch, self.device)
if is_train:
self.optimizer.zero_grad()
outputs = self.model(**batch)
if type(outputs) is dict:
batch.update(outputs)
else:
batch["logits"] = outputs
batch["log_probs"] = F.log_softmax(batch["logits"], dim=-1)
batch["log_probs_length"] = self.model.transform_input_lengths(
batch["spectrogram_length"]
)
batch["loss"] = self.criterion(**batch)
if is_train:
batch["loss"].backward()
self._clip_grad_norm()
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
metrics.update("loss", batch["loss"].item())
for met in self.metrics:
metrics.update(met.name, met(**batch))
return batch
def _valid_epoch(self, epoch):
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, batch in tqdm(
enumerate(self.valid_data_loader),
desc="validation",
total=len(self.valid_data_loader),
):
batch = self.process_batch(
batch,
is_train=False,
metrics=self.valid_metrics,
)
self.writer.set_step(epoch * self.len_epoch, "valid")
self._log_scalars(self.valid_metrics)
self._log_predictions(part="val", **batch)
self._log_spectrogram(batch["spectrogram"])
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins="auto")
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = "[{}/{} ({:.0f}%)]"
if hasattr(self.data_loader, "n_samples"):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
def _log_predictions(
self,
text,
log_probs,
log_probs_length,
examples_to_log=20,
*args,
**kwargs,
):
if self.writer is None:
return
argmax_inds = log_probs.cpu().argmax(-1)
argmax_inds = [
inds[: int(ind_len)]
for inds, ind_len in zip(argmax_inds, log_probs_length)
]
argmax_texts_raw = [self.text_encoder.decode(inds) for inds in argmax_inds]
argmax_texts = [self.text_encoder.ctc_decode(inds) for inds in argmax_inds]
tuples = list(zip(argmax_texts, text, argmax_texts_raw))
shuffle(tuples)
to_log_pred = []
to_log_pred_raw = []
for pred, target, raw_pred in tuples[:examples_to_log]:
wer = calc_wer(target, pred) * 100
cer = calc_cer(target, pred) * 100
to_log_pred.append(
f"true: '{target}' | pred: '{pred}' "
f"| wer: {wer:.2f} | cer: {cer:.2f}"
)
to_log_pred_raw.append(f"true: '{target}' | pred: '{raw_pred}'\n")
self.writer.add_text(f"predictions", "< < < < > > > >".join(to_log_pred))
self.writer.add_text(
f"predictions_raw", "< < < < > > > >".join(to_log_pred_raw)
)
def _log_spectrogram(self, spectrogram_batch):
spectrogram = random.choice(spectrogram_batch)
image = PIL.Image.open(plot_spectrogram_to_buf(spectrogram.cpu().log()))
self.writer.add_image("spectrogram", ToTensor()(image))
@torch.no_grad()
def get_grad_norm(self, norm_type=2):
parameters = self.model.parameters()
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
total_norm = torch.norm(
torch.stack(
[torch.norm(p.grad.detach(), norm_type).cpu() for p in parameters]
),
norm_type,
)
return total_norm.item()
def _log_scalars(self, metric_tracker: MetricTracker):
if self.writer is None:
return
for metric_name in metric_tracker.keys():
self.writer.add_scalar(f"{metric_name}", metric_tracker.avg(metric_name))
| true | true |
f7f6277f21241662b4f6f990c90a88a716d3314b | 1,282 | py | Python | samples/interactive-tutorials/search/search_simple_query_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 1 | 2022-02-11T14:00:31.000Z | 2022-02-11T14:00:31.000Z | samples/interactive-tutorials/search/search_simple_query_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | null | null | null | samples/interactive-tutorials/search/search_simple_query_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 2 | 2022-01-28T09:53:16.000Z | 2022-02-07T14:27:38.000Z | # Copyright 2022 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Call Retail API to search for a products in a catalog using only search query.
#
import re
import subprocess
from search_simple_query import search
def test_search_simple_query_pass():
output = str(subprocess.check_output("python search_simple_query.py", shell=True))
assert re.match(".*search request.*", output)
assert re.match(".*search response.*", output)
# check the response contains some products
assert re.match(".*results.*id.*", output)
def test_search_simple_query_response():
response = search()
assert len(response.results) == 10
product_title = response.results[0].product.title
assert re.match(".*Hoodie.*", product_title)
| 33.736842 | 86 | 0.74415 |
import re
import subprocess
from search_simple_query import search
def test_search_simple_query_pass():
output = str(subprocess.check_output("python search_simple_query.py", shell=True))
assert re.match(".*search request.*", output)
assert re.match(".*search response.*", output)
assert re.match(".*results.*id.*", output)
def test_search_simple_query_response():
response = search()
assert len(response.results) == 10
product_title = response.results[0].product.title
assert re.match(".*Hoodie.*", product_title)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.