hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
486b981b713197860cdc4557a6d9d1421bd0f079 | 213 | py | Python | mtr/sync/context_processors.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | mtr/sync/context_processors.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | mtr/sync/context_processors.py | mtrgroup/django-mtr-import-export | b8e7a6fa1cbc58b9e2126526f418306a7490cb52 | [
"MIT"
] | null | null | null | from .models import Settings
def settings_global():
return Settings.objects.filter(show_in_quick_menu=True)
def settings(request):
return {'mtr': {'sync': {
'settings': settings_global
}}}
| 17.75 | 59 | 0.680751 |
9850eec1067884d1a41034a147e545afdb86423f | 108 | py | Python | app/controller/__init__.py | Ezak91/flask-restx-boilerplate | 918982fd666d29497569bea9911774c2c76e9422 | [
"MIT"
] | null | null | null | app/controller/__init__.py | Ezak91/flask-restx-boilerplate | 918982fd666d29497569bea9911774c2c76e9422 | [
"MIT"
] | null | null | null | app/controller/__init__.py | Ezak91/flask-restx-boilerplate | 918982fd666d29497569bea9911774c2c76e9422 | [
"MIT"
] | null | null | null | from .user import api as user_namespace
from .auth import api as auth_api
from .role import api as role_api
| 27 | 39 | 0.805556 |
3cd4b406e39b5618a36ac18ef408d87d1a4c8105 | 40,942 | py | Python | planning/GamesTests/menu/mysql/connector/connection.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | null | null | null | planning/GamesTests/menu/mysql/connector/connection.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | null | null | null | planning/GamesTests/menu/mysql/connector/connection.py | ChristsY12Robots/Robots | 306f000f85b41731a9e1249847434858319e5415 | [
"MIT"
] | 1 | 2020-07-22T12:02:03.000Z | 2020-07-22T12:02:03.000Z | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing communication with MySQL servers.
"""
from io import IOBase
import os
import time
from .authentication import get_auth_plugin
from .catch23 import PY2, isstr, UNICODE_TYPES
from .constants import (
ClientFlag, ServerCmd, ServerFlag,
flag_is_set, ShutdownType, NET_BUFFER_LENGTH
)
from . import errors
from .conversion import MySQLConverter
from .cursor import (
CursorBase, MySQLCursor, MySQLCursorRaw,
MySQLCursorBuffered, MySQLCursorBufferedRaw, MySQLCursorPrepared,
MySQLCursorDict, MySQLCursorBufferedDict, MySQLCursorNamedTuple,
MySQLCursorBufferedNamedTuple)
from .network import MySQLUnixSocket, MySQLTCPSocket
from .protocol import MySQLProtocol
from .utils import int4store
from .abstracts import MySQLConnectionAbstract
class MySQLConnection(MySQLConnectionAbstract):
"""Connection to a MySQL Server"""
def __init__(self, *args, **kwargs):
self._protocol = None
self._socket = None
self._handshake = None
super(MySQLConnection, self).__init__(*args, **kwargs)
self._converter_class = MySQLConverter
self._client_flags = ClientFlag.get_default()
self._charset_id = 33
self._sql_mode = None
self._time_zone = None
self._autocommit = False
self._user = ''
self._password = ''
self._database = ''
self._host = '127.0.0.1'
self._port = 3306
self._unix_socket = None
self._client_host = ''
self._client_port = 0
self._ssl = {}
self._force_ipv6 = False
self._use_unicode = True
self._get_warnings = False
self._raise_on_warnings = False
self._buffered = False
self._unread_result = False
self._have_next_result = False
self._raw = False
self._in_transaction = False
self._prepared_statements = None
self._ssl_active = False
self._auth_plugin = None
self._pool_config_version = None
if len(kwargs) > 0:
try:
self.connect(**kwargs)
except:
# Tidy-up underlying socket on failure
self.close()
raise
def _do_handshake(self):
"""Get the handshake from the MySQL server"""
packet = self._socket.recv()
if packet[4] == 255:
raise errors.get_exception(packet)
self._handshake = None
try:
handshake = self._protocol.parse_handshake(packet)
except Exception as err:
raise errors.InterfaceError(
'Failed parsing handshake; {0}'.format(err))
self._server_version = self._check_server_version(
handshake['server_version_original'])
if not handshake['capabilities'] & ClientFlag.SSL:
self._client_flags &= ~ClientFlag.SSL
if self._ssl.get('verify_cert'):
raise errors.InterfaceError("SSL is required but the server "
"doesn't support it", errno=2026)
elif not self._ssl_disabled:
self._client_flags |= ClientFlag.SSL
if handshake['capabilities'] & ClientFlag.PLUGIN_AUTH:
self.set_client_flags([ClientFlag.PLUGIN_AUTH])
self._handshake = handshake
def _do_auth(self, username=None, password=None, database=None,
client_flags=0, charset=33, ssl_options=None):
"""Authenticate with the MySQL server
Authentication happens in two parts. We first send a response to the
handshake. The MySQL server will then send either an AuthSwitchRequest
or an error packet.
Raises NotSupportedError when we get the old, insecure password
reply back. Raises any error coming from MySQL.
"""
self._ssl_active = False
if client_flags & ClientFlag.SSL:
packet = self._protocol.make_auth_ssl(charset=charset,
client_flags=client_flags)
self._socket.send(packet)
self._socket.switch_to_ssl(ssl_options.get('ca'),
ssl_options.get('cert'),
ssl_options.get('key'),
ssl_options.get('verify_cert') or False,
ssl_options.get('cipher'))
self._ssl_active = True
packet = self._protocol.make_auth(
handshake=self._handshake,
username=username, password=password, database=database,
charset=charset, client_flags=client_flags,
ssl_enabled=self._ssl_active,
auth_plugin=self._auth_plugin)
self._socket.send(packet)
self._auth_switch_request(username, password)
if not (client_flags & ClientFlag.CONNECT_WITH_DB) and database:
self.cmd_init_db(database)
return True
def _auth_switch_request(self, username=None, password=None):
"""Handle second part of authentication
Raises NotSupportedError when we get the old, insecure password
reply back. Raises any error coming from MySQL.
"""
auth = None
new_auth_plugin = self._auth_plugin or self._handshake["auth_plugin"]
packet = self._socket.recv()
if packet[4] == 254 and len(packet) == 5:
raise errors.NotSupportedError(
"Authentication with old (insecure) passwords "
"is not supported. For more information, lookup "
"Password Hashing in the latest MySQL manual")
elif packet[4] == 254:
# AuthSwitchRequest
(new_auth_plugin,
auth_data) = self._protocol.parse_auth_switch_request(packet)
auth = get_auth_plugin(new_auth_plugin)(
auth_data, password=password, ssl_enabled=self._ssl_active)
response = auth.auth_response()
self._socket.send(response)
packet = self._socket.recv()
if packet[4] == 1:
auth_data = self._protocol.parse_auth_more_data(packet)
auth = get_auth_plugin(new_auth_plugin)(
auth_data, password=password, ssl_enabled=self._ssl_active)
if new_auth_plugin == "caching_sha2_password":
response = auth.auth_response()
if response:
self._socket.send(response)
packet = self._socket.recv()
if packet[4] == 0:
return self._handle_ok(packet)
elif packet[4] == 255:
raise errors.get_exception(packet)
def _get_connection(self, prtcls=None):
"""Get connection based on configuration
This method will return the appropriated connection object using
the connection parameters.
Returns subclass of MySQLBaseSocket.
"""
# pylint: disable=R0204
conn = None
if self.unix_socket and os.name != 'nt':
conn = MySQLUnixSocket(unix_socket=self.unix_socket)
else:
conn = MySQLTCPSocket(host=self.server_host,
port=self.server_port,
force_ipv6=self._force_ipv6)
# pylint: enable=R0204
conn.set_connection_timeout(self._connection_timeout)
return conn
def _open_connection(self):
"""Open the connection to the MySQL server
This method sets up and opens the connection to the MySQL server.
Raises on errors.
"""
self._protocol = MySQLProtocol()
self._socket = self._get_connection()
self._socket.open_connection()
self._do_handshake()
self._do_auth(self._user, self._password,
self._database, self._client_flags, self._charset_id,
self._ssl)
self.set_converter_class(self._converter_class)
if self._client_flags & ClientFlag.COMPRESS:
self._socket.recv = self._socket.recv_compressed
self._socket.send = self._socket.send_compressed
def shutdown(self):
"""Shut down connection to MySQL Server.
"""
if not self._socket:
return
try:
self._socket.shutdown()
except (AttributeError, errors.Error):
pass # Getting an exception would mean we are disconnected.
self._socket = None
def close(self):
"""Disconnect from the MySQL server"""
if not self._socket:
return
try:
self.cmd_quit()
self._socket.close_connection()
except (AttributeError, errors.Error):
pass # Getting an exception would mean we are disconnected.
self._socket = None
disconnect = close
def _send_cmd(self, command, argument=None, packet_number=0, packet=None,
expect_response=True, compressed_packet_number=0):
"""Send a command to the MySQL server
This method sends a command with an optional argument.
If packet is not None, it will be sent and the argument will be
ignored.
The packet_number is optional and should usually not be used.
Some commands might not result in the MySQL server returning
a response. If a command does not return anything, you should
set expect_response to False. The _send_cmd method will then
return None instead of a MySQL packet.
Returns a MySQL packet or None.
"""
self.handle_unread_result()
try:
self._socket.send(
self._protocol.make_command(command, packet or argument),
packet_number, compressed_packet_number)
except AttributeError:
raise errors.OperationalError("MySQL Connection not available.")
if not expect_response:
return None
return self._socket.recv()
def _send_data(self, data_file, send_empty_packet=False):
"""Send data to the MySQL server
This method accepts a file-like object and sends its data
as is to the MySQL server. If the send_empty_packet is
True, it will send an extra empty package (for example
when using LOAD LOCAL DATA INFILE).
Returns a MySQL packet.
"""
self.handle_unread_result()
if not hasattr(data_file, 'read'):
raise ValueError("expecting a file-like object")
try:
buf = data_file.read(NET_BUFFER_LENGTH - 16)
while buf:
self._socket.send(buf)
buf = data_file.read(NET_BUFFER_LENGTH - 16)
except AttributeError:
raise errors.OperationalError("MySQL Connection not available.")
if send_empty_packet:
try:
self._socket.send(b'')
except AttributeError:
raise errors.OperationalError(
"MySQL Connection not available.")
return self._socket.recv()
def _handle_server_status(self, flags):
"""Handle the server flags found in MySQL packets
This method handles the server flags send by MySQL OK and EOF
packets. It, for example, checks whether there exists more result
sets or whether there is an ongoing transaction.
"""
self._have_next_result = flag_is_set(ServerFlag.MORE_RESULTS_EXISTS,
flags)
self._in_transaction = flag_is_set(ServerFlag.STATUS_IN_TRANS, flags)
@property
def in_transaction(self):
"""MySQL session has started a transaction"""
return self._in_transaction
def _handle_ok(self, packet):
"""Handle a MySQL OK packet
This method handles a MySQL OK packet. When the packet is found to
be an Error packet, an error will be raised. If the packet is neither
an OK or an Error packet, errors.InterfaceError will be raised.
Returns a dict()
"""
if packet[4] == 0:
ok_pkt = self._protocol.parse_ok(packet)
self._handle_server_status(ok_pkt['status_flag'])
return ok_pkt
elif packet[4] == 255:
raise errors.get_exception(packet)
raise errors.InterfaceError('Expected OK packet')
def _handle_eof(self, packet):
"""Handle a MySQL EOF packet
This method handles a MySQL EOF packet. When the packet is found to
be an Error packet, an error will be raised. If the packet is neither
and OK or an Error packet, errors.InterfaceError will be raised.
Returns a dict()
"""
if packet[4] == 254:
eof = self._protocol.parse_eof(packet)
self._handle_server_status(eof['status_flag'])
return eof
elif packet[4] == 255:
raise errors.get_exception(packet)
raise errors.InterfaceError('Expected EOF packet')
def _handle_load_data_infile(self, filename):
"""Handle a LOAD DATA INFILE LOCAL request"""
try:
data_file = open(filename, 'rb')
except IOError:
# Send a empty packet to cancel the operation
try:
self._socket.send(b'')
except AttributeError:
raise errors.OperationalError(
"MySQL Connection not available.")
raise errors.InterfaceError(
"File '{0}' could not be read".format(filename))
return self._handle_ok(self._send_data(data_file,
send_empty_packet=True))
def _handle_result(self, packet):
"""Handle a MySQL Result
This method handles a MySQL result, for example, after sending the
query command. OK and EOF packets will be handled and returned. If
the packet is an Error packet, an errors.Error-exception will be
raised.
The dictionary returned of:
- columns: column information
- eof: the EOF-packet information
Returns a dict()
"""
if not packet or len(packet) < 4:
raise errors.InterfaceError('Empty response')
elif packet[4] == 0:
return self._handle_ok(packet)
elif packet[4] == 251:
if PY2:
filename = str(packet[5:])
else:
filename = packet[5:].decode()
return self._handle_load_data_infile(filename)
elif packet[4] == 254:
return self._handle_eof(packet)
elif packet[4] == 255:
raise errors.get_exception(packet)
# We have a text result set
column_count = self._protocol.parse_column_count(packet)
if not column_count or not isinstance(column_count, int):
raise errors.InterfaceError('Illegal result set.')
columns = [None,] * column_count
for i in range(0, column_count):
columns[i] = self._protocol.parse_column(
self._socket.recv(), self.python_charset)
eof = self._handle_eof(self._socket.recv())
self.unread_result = True
return {'columns': columns, 'eof': eof}
def get_row(self, binary=False, columns=None):
"""Get the next rows returned by the MySQL server
This method gets one row from the result set after sending, for
example, the query command. The result is a tuple consisting of the
row and the EOF packet.
If no row was available in the result set, the row data will be None.
Returns a tuple.
"""
(rows, eof) = self.get_rows(count=1, binary=binary, columns=columns)
if len(rows):
return (rows[0], eof)
return (None, eof)
def get_rows(self, count=None, binary=False, columns=None):
"""Get all rows returned by the MySQL server
This method gets all rows returned by the MySQL server after sending,
for example, the query command. The result is a tuple consisting of
a list of rows and the EOF packet.
Returns a tuple()
"""
if not self.unread_result:
raise errors.InternalError("No result set available.")
try:
if binary:
rows = self._protocol.read_binary_result(
self._socket, columns, count)
else:
rows = self._protocol.read_text_result(self._socket,
self._server_version,
count=count)
except errors.Error as err:
self.unread_result = False
raise err
if rows[-1] is not None:
row = rows[-1] # OK or EOF
self._handle_server_status(row['status_flag'] if 'status_flag' in
row else row['server_status'])
self.unread_result = False
return rows
def consume_results(self):
"""Consume results
"""
if self.unread_result:
self.get_rows()
def cmd_init_db(self, database):
"""Change the current database
This method changes the current (default) database by sending the
INIT_DB command. The result is a dictionary containing the OK packet
information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.INIT_DB, database.encode('utf-8')))
def cmd_query(self, query, raw=False, buffered=False, raw_as_string=False):
"""Send a query to the MySQL server
This method send the query to the MySQL server and returns the result.
If there was a text result, a tuple will be returned consisting of
the number of columns and a list containing information about these
columns.
When the query doesn't return a text result, the OK or EOF packet
information as dictionary will be returned. In case the result was
an error, exception errors.Error will be raised.
Returns a tuple()
"""
if not isinstance(query, bytes):
query = query.encode('utf-8')
result = self._handle_result(self._send_cmd(ServerCmd.QUERY, query))
if self._have_next_result:
raise errors.InterfaceError(
'Use cmd_query_iter for statements with multiple queries.')
return result
def cmd_query_iter(self, statements):
"""Send one or more statements to the MySQL server
Similar to the cmd_query method, but instead returns a generator
object to iterate through results. It sends the statements to the
MySQL server and through the iterator you can get the results.
statement = 'SELECT 1; INSERT INTO t1 VALUES (); SELECT 2'
for result in cnx.cmd_query(statement, iterate=True):
if 'columns' in result:
columns = result['columns']
rows = cnx.get_rows()
else:
# do something useful with INSERT result
Returns a generator.
"""
if not isinstance(statements, bytearray):
if isstr(statements) and isinstance(statements, UNICODE_TYPES):
statements = statements.encode('utf8')
statements = bytearray(statements)
# Handle the first query result
yield self._handle_result(self._send_cmd(ServerCmd.QUERY, statements))
# Handle next results, if any
while self._have_next_result:
self.handle_unread_result()
yield self._handle_result(self._socket.recv())
def cmd_refresh(self, options):
"""Send the Refresh command to the MySQL server
This method sends the Refresh command to the MySQL server. The options
argument should be a bitwise value using constants.RefreshOption.
Usage example:
RefreshOption = mysql.connector.RefreshOption
refresh = RefreshOption.LOG | RefreshOption.THREADS
cnx.cmd_refresh(refresh)
The result is a dictionary with the OK packet information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.REFRESH, int4store(options)))
def cmd_quit(self):
"""Close the current connection with the server
This method sends the QUIT command to the MySQL server, closing the
current connection. Since the no response can be returned to the
client, cmd_quit() will return the packet it send.
Returns a str()
"""
self.handle_unread_result()
packet = self._protocol.make_command(ServerCmd.QUIT)
self._socket.send(packet, 0, 0)
return packet
def cmd_shutdown(self, shutdown_type=None):
"""Shut down the MySQL Server
This method sends the SHUTDOWN command to the MySQL server and is only
possible if the current user has SUPER privileges. The result is a
dictionary containing the OK packet information.
Note: Most applications and scripts do not the SUPER privilege.
Returns a dict()
"""
if shutdown_type:
if not ShutdownType.get_info(shutdown_type):
raise errors.InterfaceError("Invalid shutdown type")
atype = shutdown_type
else:
atype = ShutdownType.SHUTDOWN_DEFAULT
return self._handle_eof(self._send_cmd(ServerCmd.SHUTDOWN,
int4store(atype)))
def cmd_statistics(self):
"""Send the statistics command to the MySQL Server
This method sends the STATISTICS command to the MySQL server. The
result is a dictionary with various statistical information.
Returns a dict()
"""
self.handle_unread_result()
packet = self._protocol.make_command(ServerCmd.STATISTICS)
self._socket.send(packet, 0, 0)
return self._protocol.parse_statistics(self._socket.recv())
def cmd_process_kill(self, mysql_pid):
"""Kill a MySQL process
This method send the PROCESS_KILL command to the server along with
the process ID. The result is a dictionary with the OK packet
information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.PROCESS_KILL, int4store(mysql_pid)))
def cmd_debug(self):
"""Send the DEBUG command
This method sends the DEBUG command to the MySQL server, which
requires the MySQL user to have SUPER privilege. The output will go
to the MySQL server error log and the result of this method is a
dictionary with EOF packet information.
Returns a dict()
"""
return self._handle_eof(self._send_cmd(ServerCmd.DEBUG))
def cmd_ping(self):
"""Send the PING command
This method sends the PING command to the MySQL server. It is used to
check if the the connection is still valid. The result of this
method is dictionary with OK packet information.
Returns a dict()
"""
return self._handle_ok(self._send_cmd(ServerCmd.PING))
def cmd_change_user(self, username='', password='', database='',
charset=33):
"""Change the current logged in user
This method allows to change the current logged in user information.
The result is a dictionary with OK packet information.
Returns a dict()
"""
self.handle_unread_result()
if self._compress:
raise errors.NotSupportedError("Change user is not supported with "
"compression.")
packet = self._protocol.make_change_user(
handshake=self._handshake,
username=username, password=password, database=database,
charset=charset, client_flags=self._client_flags,
ssl_enabled=self._ssl_active,
auth_plugin=self._auth_plugin)
self._socket.send(packet, 0, 0)
ok_packet = self._auth_switch_request(username, password)
try:
if not (self._client_flags & ClientFlag.CONNECT_WITH_DB) \
and database:
self.cmd_init_db(database)
except:
raise
self._charset_id = charset
self._post_connection()
return ok_packet
@property
def database(self):
"""Get the current database"""
return self.info_query("SELECT DATABASE()")[0]
@database.setter
def database(self, value): # pylint: disable=W0221
"""Set the current database"""
self.cmd_query("USE %s" % value)
def is_connected(self):
"""Reports whether the connection to MySQL Server is available
This method checks whether the connection to MySQL is available.
It is similar to ping(), but unlike the ping()-method, either True
or False is returned and no exception is raised.
Returns True or False.
"""
try:
self.cmd_ping()
except:
return False # This method does not raise
return True
def reset_session(self, user_variables=None, session_variables=None):
"""Clears the current active session
This method resets the session state, if the MySQL server is 5.7.3
or later active session will be reset without re-authenticating.
For other server versions session will be reset by re-authenticating.
It is possible to provide a sequence of variables and their values to
be set after clearing the session. This is possible for both user
defined variables and session variables.
This method takes two arguments user_variables and session_variables
which are dictionaries.
Raises OperationalError if not connected, InternalError if there are
unread results and InterfaceError on errors.
"""
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
try:
self.cmd_reset_connection()
except errors.NotSupportedError:
self.cmd_change_user(self._user, self._password,
self._database, self._charset_id)
cur = self.cursor()
if user_variables:
for key, value in user_variables.items():
cur.execute("SET @`{0}` = %s".format(key), (value,))
if session_variables:
for key, value in session_variables.items():
cur.execute("SET SESSION `{0}` = %s".format(key), (value,))
def reconnect(self, attempts=1, delay=0):
"""Attempt to reconnect to the MySQL server
The argument attempts should be the number of times a reconnect
is tried. The delay argument is the number of seconds to wait between
each retry.
You may want to set the number of attempts higher and use delay when
you expect the MySQL server to be down for maintenance or when you
expect the network to be temporary unavailable.
Raises InterfaceError on errors.
"""
counter = 0
while counter != attempts:
counter = counter + 1
try:
self.disconnect()
self.connect()
if self.is_connected():
break
except Exception as err: # pylint: disable=W0703
if counter == attempts:
msg = "Can not reconnect to MySQL after {0} "\
"attempt(s): {1}".format(attempts, str(err))
raise errors.InterfaceError(msg)
if delay > 0:
time.sleep(delay)
def ping(self, reconnect=False, attempts=1, delay=0):
"""Check availability of the MySQL server
When reconnect is set to True, one or more attempts are made to try
to reconnect to the MySQL server using the reconnect()-method.
delay is the number of seconds to wait between each retry.
When the connection is not available, an InterfaceError is raised. Use
the is_connected()-method if you just want to check the connection
without raising an error.
Raises InterfaceError on errors.
"""
try:
self.cmd_ping()
except:
if reconnect:
self.reconnect(attempts=attempts, delay=delay)
else:
raise errors.InterfaceError("Connection to MySQL is"
" not available.")
@property
def connection_id(self):
"""MySQL connection ID"""
try:
return self._handshake['server_threadid']
except KeyError:
return None
def cursor(self, buffered=None, raw=None, prepared=None, cursor_class=None,
dictionary=None, named_tuple=None):
"""Instantiates and returns a cursor
By default, MySQLCursor is returned. Depending on the options
while connecting, a buffered and/or raw cursor is instantiated
instead. Also depending upon the cursor options, rows can be
returned as dictionary or named tuple.
Dictionary and namedtuple based cursors are available with buffered
output but not raw.
It is possible to also give a custom cursor through the
cursor_class parameter, but it needs to be a subclass of
mysql.connector.cursor.CursorBase.
Raises ProgrammingError when cursor_class is not a subclass of
CursorBase. Raises ValueError when cursor is not available.
Returns a cursor-object
"""
self.handle_unread_result()
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
if cursor_class is not None:
if not issubclass(cursor_class, CursorBase):
raise errors.ProgrammingError(
"Cursor class needs be to subclass of cursor.CursorBase")
return (cursor_class)(self)
buffered = buffered if buffered is not None else self._buffered
raw = raw if raw is not None else self._raw
cursor_type = 0
if buffered is True:
cursor_type |= 1
if raw is True:
cursor_type |= 2
if dictionary is True:
cursor_type |= 4
if named_tuple is True:
cursor_type |= 8
if prepared is True:
cursor_type |= 16
types = {
0: MySQLCursor, # 0
1: MySQLCursorBuffered,
2: MySQLCursorRaw,
3: MySQLCursorBufferedRaw,
4: MySQLCursorDict,
5: MySQLCursorBufferedDict,
8: MySQLCursorNamedTuple,
9: MySQLCursorBufferedNamedTuple,
16: MySQLCursorPrepared
}
try:
return (types[cursor_type])(self)
except KeyError:
args = ('buffered', 'raw', 'dictionary', 'named_tuple', 'prepared')
raise ValueError('Cursor not available with given criteria: ' +
', '.join([args[i] for i in range(5)
if cursor_type & (1 << i) != 0]))
def commit(self):
"""Commit current transaction"""
self._execute_query("COMMIT")
def rollback(self):
"""Rollback current transaction"""
if self.unread_result:
self.get_rows()
self._execute_query("ROLLBACK")
def _execute_query(self, query):
"""Execute a query
This method simply calls cmd_query() after checking for unread
result. If there are still unread result, an errors.InterfaceError
is raised. Otherwise whatever cmd_query() returns is returned.
Returns a dict()
"""
self.handle_unread_result()
self.cmd_query(query)
def info_query(self, query):
"""Send a query which only returns 1 row"""
cursor = self.cursor(buffered=True)
cursor.execute(query)
return cursor.fetchone()
def _handle_binary_ok(self, packet):
"""Handle a MySQL Binary Protocol OK packet
This method handles a MySQL Binary Protocol OK packet. When the
packet is found to be an Error packet, an error will be raised. If
the packet is neither an OK or an Error packet, errors.InterfaceError
will be raised.
Returns a dict()
"""
if packet[4] == 0:
return self._protocol.parse_binary_prepare_ok(packet)
elif packet[4] == 255:
raise errors.get_exception(packet)
raise errors.InterfaceError('Expected Binary OK packet')
def _handle_binary_result(self, packet):
"""Handle a MySQL Result
This method handles a MySQL result, for example, after sending the
query command. OK and EOF packets will be handled and returned. If
the packet is an Error packet, an errors.Error-exception will be
raised.
The tuple returned by this method consist of:
- the number of columns in the result,
- a list of tuples with information about the columns,
- the EOF packet information as a dictionary.
Returns tuple() or dict()
"""
if not packet or len(packet) < 4:
raise errors.InterfaceError('Empty response')
elif packet[4] == 0:
return self._handle_ok(packet)
elif packet[4] == 254:
return self._handle_eof(packet)
elif packet[4] == 255:
raise errors.get_exception(packet)
# We have a binary result set
column_count = self._protocol.parse_column_count(packet)
if not column_count or not isinstance(column_count, int):
raise errors.InterfaceError('Illegal result set.')
columns = [None] * column_count
for i in range(0, column_count):
columns[i] = self._protocol.parse_column(
self._socket.recv(), self.python_charset)
eof = self._handle_eof(self._socket.recv())
return (column_count, columns, eof)
def cmd_stmt_fetch(self, statement_id, rows=1):
"""Fetch a MySQL statement Result Set
This method will send the FETCH command to MySQL together with the
given statement id and the number of rows to fetch.
"""
packet = self._protocol.make_stmt_fetch(statement_id, rows)
self.unread_result = False
self._send_cmd(ServerCmd.STMT_FETCH, packet, expect_response=False)
self.unread_result = True
def cmd_stmt_prepare(self, statement):
"""Prepare a MySQL statement
This method will send the PREPARE command to MySQL together with the
given statement.
Returns a dict()
"""
packet = self._send_cmd(ServerCmd.STMT_PREPARE, statement)
result = self._handle_binary_ok(packet)
result['columns'] = []
result['parameters'] = []
if result['num_params'] > 0:
for _ in range(0, result['num_params']):
result['parameters'].append(
self._protocol.parse_column(self._socket.recv(),
self.python_charset))
self._handle_eof(self._socket.recv())
if result['num_columns'] > 0:
for _ in range(0, result['num_columns']):
result['columns'].append(
self._protocol.parse_column(self._socket.recv(),
self.python_charset))
self._handle_eof(self._socket.recv())
return result
def cmd_stmt_execute(self, statement_id, data=(), parameters=(), flags=0):
"""Execute a prepared MySQL statement"""
parameters = list(parameters)
long_data_used = {}
if data:
for param_id, _ in enumerate(parameters):
if isinstance(data[param_id], IOBase):
binary = True
try:
binary = 'b' not in data[param_id].mode
except AttributeError:
pass
self.cmd_stmt_send_long_data(statement_id, param_id,
data[param_id])
long_data_used[param_id] = (binary,)
execute_packet = self._protocol.make_stmt_execute(
statement_id, data, tuple(parameters), flags,
long_data_used, self.charset)
packet = self._send_cmd(ServerCmd.STMT_EXECUTE, packet=execute_packet)
result = self._handle_binary_result(packet)
return result
def cmd_stmt_close(self, statement_id):
"""Deallocate a prepared MySQL statement
This method deallocates the prepared statement using the
statement_id. Note that the MySQL server does not return
anything.
"""
self._send_cmd(ServerCmd.STMT_CLOSE, int4store(statement_id),
expect_response=False)
def cmd_stmt_send_long_data(self, statement_id, param_id, data):
"""Send data for a column
This methods send data for a column (for example BLOB) for statement
identified by statement_id. The param_id indicate which parameter
the data belongs too.
The data argument should be a file-like object.
Since MySQL does not send anything back, no error is raised. When
the MySQL server is not reachable, an OperationalError is raised.
cmd_stmt_send_long_data should be called before cmd_stmt_execute.
The total bytes send is returned.
Returns int.
"""
chunk_size = 8192
total_sent = 0
# pylint: disable=W0212
prepare_packet = self._protocol._prepare_stmt_send_long_data
# pylint: enable=W0212
try:
buf = data.read(chunk_size)
while buf:
packet = prepare_packet(statement_id, param_id, buf)
self._send_cmd(ServerCmd.STMT_SEND_LONG_DATA, packet=packet,
expect_response=False)
total_sent += len(buf)
buf = data.read(chunk_size)
except AttributeError:
raise errors.OperationalError("MySQL Connection not available.")
return total_sent
def cmd_stmt_reset(self, statement_id):
"""Reset data for prepared statement sent as long data
The result is a dictionary with OK packet information.
Returns a dict()
"""
self._handle_ok(self._send_cmd(ServerCmd.STMT_RESET,
int4store(statement_id)))
def cmd_reset_connection(self):
"""Resets the session state without re-authenticating
Works only for MySQL server 5.7.3 or later.
The result is a dictionary with OK packet information.
Returns a dict()
"""
if self._server_version < (5, 7, 3):
raise errors.NotSupportedError("MySQL version 5.7.2 and "
"earlier does not support "
"COM_RESET_CONNECTION.")
self._handle_ok(self._send_cmd(ServerCmd.RESET_CONNECTION))
self._post_connection()
def handle_unread_result(self):
"""Check whether there is an unread result"""
if self.can_consume_results:
self.consume_results()
elif self.unread_result:
raise errors.InternalError("Unread result found")
| 37.051584 | 79 | 0.613258 |
37c7836bbda3e5225254b0bd342f79e9e66b2315 | 2,847 | py | Python | demos/unsteady_default_lspg_advdiff1d_mlp/autoencoder_keras.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 4 | 2020-07-06T20:01:39.000Z | 2022-03-05T09:23:40.000Z | demos/unsteady_default_lspg_advdiff1d_mlp/autoencoder_keras.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 19 | 2020-02-27T20:52:53.000Z | 2022-01-13T16:24:49.000Z | demos/unsteady_default_lspg_advdiff1d_mlp/autoencoder_keras.py | Pressio/pressio4py | 36676dbd112a7c7960ccbf302ff14d4376c819ec | [
"Unlicense",
"BSD-3-Clause"
] | 1 | 2022-03-03T16:05:09.000Z | 2022-03-03T16:05:09.000Z | import numpy as np
import tensorflow as tf
class myAutoencoder(tf.keras.Model):
def __init__(self, fomSize, romSize=10):
super(myAutoencoder, self).__init__()
self.fomSize = fomSize
self.romSize = romSize
def build(self, input_shape):
self.dense1 = tf.keras.layers.Dense(200, input_dim=input_shape, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(64, input_dim=200, activation=tf.nn.relu)
self.dense3 = tf.keras.layers.Dense(self.romSize, input_dim=64, activation=tf.nn.relu)
self.dense4 = tf.keras.layers.Dense(64, input_dim=self.romSize, activation=tf.nn.relu)
self.dense5 = tf.keras.layers.Dense(200, input_dim=64, activation=tf.nn.relu)
self.dense6 = tf.keras.layers.Dense(self.fomSize, input_dim=200, activation=None)
def encoder(self, x):
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return x
def decoder(self, x):
x = self.dense4(x)
x = self.dense5(x)
x = self.dense6(x)
return x
def call(self, x):
code = self.encoder(x)
x = self.decoder(code)
return x
class MyMapper:
def __init__(self, autoencoderObj):
self.model_ = autoencoderObj
self.numModes_ = autoencoderObj.romSize
fomSize = autoencoderObj.fomSize
self.fomState0 = np.zeros(fomSize)
self.fomState1 = np.zeros(fomSize)
# attention: the jacobian of the mapping must be column-major oder
# so that pressio can view it without deep copying it, this enables
# to keep only one jacobian object around and to call the update
# method below correctly
self.jacobian_ = np.zeros((fomSize,self.numModes_), order='F')
def jacobian(self): return self.jacobian_
def applyMapping(self, romState, fomState):
fomState[:] = self.model_.decoder(romState.reshape((1,-1)))[:]
def applyInverseMapping(self, fomState):
romState = np.zeros(self.numModes_)
romState[:] = self.model_.encoder(fomState.reshape((1,-1)))[:]
return romState
def updateJacobian(self, romState):
# finite difference to approximate jacobian of the mapping
romStateLocal = romState.copy()
self.applyMapping(romStateLocal,self.fomState0)
eps = 0.001
for i in range(self.numModes_):
romStateLocal[i] += eps
self.applyMapping(romStateLocal, self.fomState1)
self.jacobian_[:,i] = (self.fomState1 - self.fomState0) / eps
romStateLocal[i] -= eps
def trainMapping(snapshots, romSize, epochs):
fomSize = snapshots.shape[0]
tf.keras.backend.set_floatx('float64')
model = myAutoencoder(fomSize, romSize)
model.compile(loss='mean_squared_error', optimizer = tf.keras.optimizers.Adam())
autoencoder_train = model.fit(snapshots.T, snapshots.T, batch_size=500,epochs=epochs,verbose=0)
return MyMapper(model)
| 35.5875 | 98 | 0.686336 |
1c91826bf26c7f808ceaeba7279d53ba02d0f028 | 445 | py | Python | src/compas_rv2/singular/rhino/artists/patternartist.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | 4 | 2022-01-17T19:17:22.000Z | 2022-01-21T18:06:02.000Z | src/compas_rv2/singular/rhino/artists/patternartist.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | null | null | null | src/compas_rv2/singular/rhino/artists/patternartist.py | selinabitting/compas-RV2 | 0884cc00d09c8f4a75eb2b97614105e4c8bfd818 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas_rhino.artists import MeshArtist
class PatternArtist(MeshArtist):
def __init__(self, pattern, layer=None):
super(PatternArtist, self).__init__(pattern, layer=layer)
@property
def pattern(self):
return self.mesh
@pattern.setter
def pattern(self, pattern):
self.mesh = pattern
| 22.25 | 65 | 0.734831 |
1700d33e51c617b7a22ce86ace6429e9be391c28 | 1,972 | py | Python | Source/Basic/Define.py | Archaic-Atom/SDA_network | 346465617e837cd32a4e789f83908b171a3a7b3a | [
"MIT"
] | 6 | 2021-08-10T01:55:14.000Z | 2022-03-16T23:17:21.000Z | Source/Basic/Define.py | Archaic-Atom/SDA_network | 346465617e837cd32a4e789f83908b171a3a7b3a | [
"MIT"
] | null | null | null | Source/Basic/Define.py | Archaic-Atom/SDA_network | 346465617e837cd32a4e789f83908b171a3a7b3a | [
"MIT"
] | 2 | 2021-08-10T01:55:17.000Z | 2022-01-03T07:14:43.000Z | # -*- coding: utf-8 -*-
#
# The program's define
# Date: 2018-05-04
# import some lib
import os
import sys
import time
import tensorflow as tf # tensorflow
import numpy as np # mat
# the path's define, you can change the content
# output's path
OUTPUT_PATH = './Result/'
# model's path
MODEL_PATH = './Model/'
# result img's path
RESULT_IMG_PATH = './ResultImg/'
# Training list
TRAIN_LIST_PATH = './dataset/trainlist.txt'
# Label list
LABEL_LIST_PATH = './dataset/label.txt'
# log path
LOG_PATH = './log/'
DATASET_NUM = 22
# DateBase define
# image name's format
LEFT_IMAGE_NAME_FORMAT = "training/image_2/%06d_10"
# image name's format
RIGHT_IMAGE_NAME_FORMAT = "training/image_3/%06d_10"
# Save's format
SAVE_FORMAT = "%06d_10"
# image's type
IMAGE_TYPE = ".png"
# image's width
IMAGE_WIDTH = 512
# image's height
IMAGE_HEIGHT = 256
# image's width
IMAGE_ORG_WIDTH = 1280
# image's height
IMAGE_ORG_HEIGHT = 384
# image's depth
IMG_DEPTH = 3
# image's num
IMAGE_NUM = 20000
# program setting
SEARCH_SPACE = 288 # the image's disparity
ORIGINAL_SEARCH_SPACE = 288 # the image's disparity
X_OFFSET = 0
Y_OFFSET = 0
MAX_STEPS = 500000 # the max step
LEARNING_RATE = 0.001 # learin rate
AUTO_SAVE_NUM = 20 # save
MODEL_NAME = "model.ckpt" # model name
VAL_SET = 20 # every 20 is validation
SHOW_SET = 40 # show the res
BATCH_SIZE = 2 # BATCH_SIZE
VAL_TIMES = 1 # the val times
FILE_NUM = 5
CIFAR_10_LABLE_NUM = 10
GPU_NUM = 1
LABLE_NUM = 6
# network para
# const para
CONV_BLOCK_NUM = 4
DENSE_BLOCK_NUM = 4
RES_3D_BLOCK = 4
FEATURE_NUM = 32
FEATURE_SCALE_NUM = 3
NON_LOCAL_BLOCK_NUM = 4
| 25.947368 | 80 | 0.592799 |
735a757d8a8f76a8b095aa106b83c6ad7cf83777 | 25,519 | py | Python | models/resnet_.py | TonyKuo777/model_quantization | c49acb20a261d27c239d1597a8faffb38b3b4f43 | [
"BSD-2-Clause"
] | 3 | 2020-08-18T17:36:04.000Z | 2021-06-22T08:11:56.000Z | models/resnet_.py | loganyan/model_quantization | c49acb20a261d27c239d1597a8faffb38b3b4f43 | [
"BSD-2-Clause"
] | null | null | null | models/resnet_.py | loganyan/model_quantization | c49acb20a261d27c239d1597a8faffb38b3b4f43 | [
"BSD-2-Clause"
] | 1 | 2020-08-18T17:37:57.000Z | 2020-08-18T17:37:57.000Z | import torch
import torch.nn as nn
import logging
import numpy as np
from .quant import conv3x3, conv1x1
from .layers import norm, actv
from .prone import qprone
# double_channel_half_resolution
class DCHR(nn.Module):
def __init__(self, stride):
super(DCHR, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=stride)
def forward(self, x):
pool = self.pool(x)
shape = pool.shape
shape = [i for i in shape]
shape[1] = shape[1] // 2
fill = x.new_zeros(shape)
return torch.cat((fill, pool, fill), 1)
# TResNet: High Performance GPU-Dedicated Architecture (https://arxiv.org/pdf/2003.13630v1.pdf)
class TResNetStem(nn.Module):
def __init__(self, out_channel, in_channel=3, stride=4, kernel_size=1, force_fp=True, args=None):
super(TResNetStem, self).__init__()
self.stride = stride
assert kernel_size in [1, 3], "Error reshape conv kernel"
if kernel_size == 1:
self.conv = conv1x1(in_channel*stride*stride, out_channel, args=args, force_fp=force_fp)
elif kernel_size == 3:
self.conv = conv3x3(in_channel*stride*stride, out_channel, args=args, force_fp=force_fp)
def forward(self, x):
B, C, H, W = x.shape
x = x.reshape(B, C, H // self.stride, self.stride, W // self.stride, self.stride)
x = x.transpose(4, 3).reshape(B, C, 1, H // self.stride, W // self.stride, self.stride * self.stride)
x = x.transpose(2, 5).reshape(B, C * self.stride * self.stride, H // self.stride, W // self.stride)
x = self.conv(x)
return x
def seq_c_b_a_s(x, conv, relu, bn, skip, skip_enbale):
out = conv(x)
out = bn(out)
out = relu(out)
if skip_enbale:
out += skip
return out
def seq_c_b_s_a(x, conv, relu, bn, skip, skip_enbale):
out = conv(x)
out = bn(out)
if skip_enbale:
out += skip
out = relu(out)
return out
def seq_c_a_b_s(x, conv, relu, bn, skip, skip_enbale):
out = conv(x)
out = relu(out)
out = bn(out)
if skip_enbale:
out += skip
return out
def seq_b_c_a_s(x, conv, relu, bn, skip, skip_enbale):
out = bn(x)
out = conv(out)
out = relu(out)
if skip_enbale:
out += skip
return out
def seq_b_a_c_s(x, conv, relu, bn, skip, skip_enbale):
out = bn(x)
out = relu(out)
out = conv(out)
if skip_enbale:
out += skip
return out
'''
BasicBlock:
different variants on architectures are supported (mainly controled by the order string
'''
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, args=None, feature_stride=1):
super(BasicBlock, self).__init__()
self.args = args
# Bi-Real structure or original structure
if 'origin' in args.keyword:
self.addition_skip = False
else:
self.addition_skip = True
if self.addition_skip and args.verbose:
logging.info("warning: add addition skip, not the origin resnet")
# quantize skip connection ?
real_skip = 'real_skip' in args.keyword
for i in range(2):
setattr(self, 'relu%d' % (i+1), nn.ModuleList([actv(args) for j in range(args.base)]))
if 'fix' in self.args.keyword and ('cbas' in self.args.keyword or 'cbsa' in self.args.keyword):
self.fix_relu = actv(args)
setattr(self, 'relu2', nn.ModuleList([nn.Sequential() for j in range(args.base)]))
if 'cbas' in args.keyword:
self.seq = seq_c_b_a_s
order = 'cbas'
elif 'cbsa' in args.keyword: # default architecture in Pytorch
self.seq = seq_c_b_s_a
order = 'cbsa'
elif 'cabs' in args.keyword: # group-net
self.seq = seq_c_a_b_s
order = 'cabs'
elif 'bacs' in args.keyword:
self.seq = seq_b_a_c_s
order = 'bacs'
elif 'bcas' in args.keyword:
self.seq = seq_b_c_a_s
order = 'bcas'
else:
self.seq = None
order = 'none'
if 'bacs' in args.keyword or 'bcas' in args.keyword:
self.bn1 = nn.ModuleList([norm(inplanes, args, feature_stride=feature_stride) for j in range(args.base)])
if 'fix' in self.args.keyword:
self.fix_bn = norm(planes, args, feature_stride=feature_stride*stride)
else:
self.bn1 = nn.ModuleList([norm(planes, args, feature_stride=feature_stride*stride) for j in range(args.base)])
self.bn2 = nn.ModuleList([norm(planes, args, feature_stride=feature_stride*stride) for j in range(args.base)])
keepdim = True
qconv3x3 = conv3x3
qconv1x1 = conv1x1
extra_padding = 0
# downsample branch
self.enable_skip = stride != 1 or inplanes != planes
downsample = []
if stride != 1:
downsample.append(nn.AvgPool2d(stride))
else:
downsample.append(nn.Sequential())
if inplanes != planes:
if 'bacs' in args.keyword:
downsample.append(norm(inplanes, args, feature_stride=feature_stride))
downsample.append(actv(args))
downsample.append(qconv1x1(inplanes, planes, stride=1, args=args, force_fp=real_skip, feature_stride=feature_stride*stride))
if 'fix' in args.keyword:
downsample.append(norm(planes, args, feature_stride=feature_stride*stride))
elif 'bcas' in args.keyword:
downsample.append(norm(inplanes, args, feature_stride=feature_stride))
downsample.append(qconv1x1(inplanes, planes, stride=1, args=args, force_fp=real_skip, feature_stride=feature_stride*stride))
downsample.append(actv(args))
if 'fix' in args.keyword: # remove the ReLU in skip connection
downsample.append(norm(planes, args, feature_stride=feature_stride*stride))
else:
downsample.append(qconv1x1(inplanes, planes, args=args, force_fp=real_skip, feature_stride=feature_stride*stride))
downsample.append(norm(planes, args, feature_stride=feature_stride*stride))
if 'fix' not in args.keyword:
downsample.append(actv(args))
if 'singleconv' in args.keyword: # pytorch official branch employ single convolution layer
for i, n in enumerate(downsample):
if isinstance(n, nn.AvgPool2d):
downsample[i] = nn.Sequential()
if isinstance(n, nn.Conv2d):
downsample[i] = qconv1x1(inplanes, planes, stride=stride, padding=extra_padding, args=args, force_fp=real_skip, feature_stride=feature_stride)
if 'DCHR' in args.keyword: # try if any performance improvement when aligning resolution without downsample
if args.verbose:
logging.warning("warning: DCHR is used in the block")
self.skip = DCHR(stride)
else:
self.skip = nn.Sequential(*downsample)
self.conv1 = nn.ModuleList([qconv3x3(inplanes, planes, stride, 1, padding=extra_padding+1, args=args, feature_stride=feature_stride, keepdim=keepdim) for j in range(args.base)])
self.conv2 = nn.ModuleList([qconv3x3(planes, planes, 1, 1, args=args, feature_stride=feature_stride*stride) for j in range(args.base)])
# scales
if args.base == 1:
self.scales = [1]
else:
self.scales = nn.ParameterList([nn.Parameter(torch.ones(1) / args.base, requires_grad=True) for i in range(args.base)])
# Fixup initialization (https://arxiv.org/abs/1901.09321)
if 'fixup' in args.keyword:
self.bn1 = nn.ModuleList([nn.Sequential()])
self.bn2 = nn.ModuleList([nn.Sequential()])
for i, n in enumerate(self.skip):
if isinstance(n, (nn.BatchNorm2d, nn.GroupNorm)):
self.skip[i] = nn.Sequential()
self.fixup_scale = nn.Parameter(torch.ones(1))
if 'bias' in args.keyword:
self.fixup_bias1a = nn.Parameter(torch.zeros(1))
self.fixup_bias1b = nn.Parameter(torch.zeros(1))
self.fixup_bias2a = nn.Parameter(torch.zeros(1))
self.fixup_bias2b = nn.Parameter(torch.zeros(1))
def forward(self, x):
if not self.enable_skip:
residual = x
if 'fixup' in self.args.keyword:
if 'bias' in self.args.keyword:
x = x + self.fixup_bias1a
if self.enable_skip:
residual = self.skip(x)
result = None
for conv1, conv2, bn1, bn2, relu1, relu2, scale in zip(self.conv1, self.conv2, \
self.bn1, self.bn2, self.relu1, self.relu2, self.scales):
if 'fixup' in self.args.keyword and 'bias' in self.args.keyword:
out = self.seq(x, conv1, relu1, bn1, self.fixup_bias1b, True) + self.fixup_bias2a
else:
out = self.seq(x, conv1, relu1, bn1, residual, self.addition_skip)
output = self.seq(out, conv2, relu2, bn2, out, self.addition_skip)
if result is None:
result = scale * output
else:
result = result + scale * output
output = result
if 'fixup' in self.args.keyword:
output = output * self.fixup_scale
if 'bias' in self.args.keyword:
output = output + self.fixup_bias2b
if not self.addition_skip:
if 'fix' in self.args.keyword and ('bacs' in self.args.keyword or 'bcas' in self.args.keyword):
output = self.fix_bn(output)
output = output + residual
if 'fix' in self.args.keyword and ('cbas' in self.args.keyword or 'cbsa' in self.args.keyword):
output = self.fix_relu(output)
return output
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, args=None, feature_stride=1):
super(BottleNeck, self).__init__()
self.args = args
# Bi-Real structure or original structure
if 'origin' in args.keyword:
self.addition_skip = False
else:
self.addition_skip = True
if self.addition_skip and args.verbose:
logging.info("warning: add addition skip, not the origin resnet")
qconv3x3 = conv3x3
qconv1x1 = conv1x1
for i in range(3):
setattr(self, 'relu%d' % (i+1), nn.ModuleList([actv(args) for j in range(args.base)]))
if 'fix' in self.args.keyword and ('cbas' in self.args.keyword or 'cbsa' in self.args.keyword):
setattr(self, 'relu3', nn.ModuleList([nn.Sequential() for j in range(args.base)]))
self.fix_relu = actv(args)
if 'cbas' in args.keyword:
self.seq = seq_c_b_a_s
elif 'cbsa' in args.keyword: # default Pytorch
self.seq = seq_c_b_s_a
elif 'cabs' in args.keyword: # group-net
self.seq = seq_c_a_b_s
elif 'bacs' in args.keyword:
self.seq = seq_b_a_c_s
elif 'bcas' in args.keyword:
self.seq = seq_b_c_a_s
else:
self.seq = None
if 'bacs' in args.keyword:
self.bn1 = nn.ModuleList([norm(inplanes, args) for j in range(args.base)])
self.bn3 = nn.ModuleList([norm(planes, args) for j in range(args.base)])
if 'fix' in self.args.keyword:
self.fix_bn = norm(planes * self.expansion, args)
else:
self.bn1 = nn.ModuleList([norm(planes, args) for j in range(args.base)])
self.bn3 = nn.ModuleList([norm(planes * self.expansion, args) for j in range(args.base)])
self.bn2 = nn.ModuleList([norm(planes, args) for j in range(args.base)])
# downsample branch
self.enable_skip = stride != 1 or inplanes != planes * self.expansion
real_skip = 'real_skip' in args.keyword
downsample = []
if stride != 1:
downsample.append(nn.AvgPool2d(stride))
else:
downsample.append(nn.Sequential())
if inplanes != planes * self.expansion:
if 'bacs' in args.keyword:
downsample.append(norm(inplanes, args))
downsample.append(actv(args))
downsample.append(qconv1x1(inplanes, planes * self.expansion, stride=1, args=args, force_fp=real_skip, feature_stride=feature_stride*stride))
if 'fix' in args.keyword:
downsample.append(norm(planes * self.expansion, args))
else:
downsample.append(qconv1x1(inplanes, planes * self.expansion, stride=1, args=args, force_fp=real_skip, feature_stride=feature_stride*stride))
downsample.append(norm(planes * self.expansion, args))
if 'fix' not in args.keyword:
downsample.append(actv(args))
if 'singleconv' in args.keyword:
for i, n in enumerate(downsample):
if isinstance(n, nn.AvgPool2d):
downsample[i] = nn.Sequential()
if isinstance(n, nn.Conv2d):
downsample[i] = qconv1x1(inplanes, planes * self.expansion, stride=stride, args=args, force_fp=real_skip, feature_stride=feature_stride)
if 'DCHR' in args.keyword:
if args.verbose:
logging.info("warning: DCHR is used in the block")
self.skip = DCHR(stride)
else:
self.skip = nn.Sequential(*downsample)
self.conv1 = nn.ModuleList([qconv1x1(inplanes, planes, 1, args=args, feature_stride=feature_stride) for j in range(args.base)])
self.conv2 = nn.ModuleList([qconv3x3(planes, planes, stride, 1, args=args, feature_stride=feature_stride) for j in range(args.base)])
feature_stride = feature_stride * stride
self.conv3 = nn.ModuleList([qconv1x1(planes, planes * self.expansion, 1, args=args, feature_stride=feature_stride) for j in range(args.base)])
if args.base == 1:
self.scales = [1]
else:
self.scales = nn.ParameterList([nn.Parameter(torch.ones(1) / args.base, requires_grad=True) for i in range(args.base)])
if 'fixup' in args.keyword:
assert args.base == 1, 'Base should be 1 in Fixup'
self.bn1 = nn.ModuleList([nn.Sequential()])
self.bn2 = nn.ModuleList([nn.Sequential()])
self.bn3 = nn.ModuleList([nn.Sequential()])
for i, n in enumerate(self.skip):
if isinstance(n, (nn.BatchNorm2d, nn.GroupNorm)):
self.skip[i] = nn.Sequential()
self.fixup_scale = nn.Parameter(torch.ones(1))
if 'bias' in args.keyword:
self.fixup_bias1a = nn.Parameter(torch.zeros(1))
self.fixup_bias1b = nn.Parameter(torch.zeros(1))
self.fixup_bias2a = nn.Parameter(torch.zeros(1))
self.fixup_bias2b = nn.Parameter(torch.zeros(1))
self.fixup_bias3a = nn.Parameter(torch.zeros(1))
self.fixup_bias3b = nn.Parameter(torch.zeros(1))
else:
pass
def forward(self, x):
if not self.enable_skip:
residual = x
if 'fixup' in self.args.keyword:
if 'bias' in self.args.keyword:
x = x + self.fixup_bias1a
if self.enable_skip:
residual = self.skip(x)
result = None
for conv1, conv2, conv3, bn1, bn2, bn3, relu1, relu2, relu3, scale in zip(self.conv1, self.conv2, self.conv3, \
self.bn1, self.bn2, self.bn3, self.relu1, self.relu2, self.relu3, self.scales):
if 'fixup' in self.args.keyword and 'bias' in self.args.keyword:
out = self.seq(x, conv1, relu1, bn1, self.fixup_bias1b, True) + self.fixup_bias2a
out = self.seq(out, conv2, relu2, bn2, self.fixup_bias2b, True) + self.fixup_bias3a
else:
out = self.seq(x, conv1, relu1, bn1, residual, self.addition_skip)
out = self.seq(out, conv2, relu2, bn2, out, self.addition_skip)
output = self.seq(out, conv3, relu3, bn3, out, self.addition_skip)
if result is None:
result = scale * output
else:
result = result + scale * output
output = result
if 'fixup' in self.args.keyword:
output = output * self.fixup_scale
if 'bias' in self.args.keyword:
output = output + self.fixup_bias3b
if not self.addition_skip:
if 'fix' in self.args.keyword and ('bacs' in self.args.keyword or 'bcas' in self.args.keyword):
output = self.fix_bn(output)
output += residual
if 'fix' in self.args.keyword and ('cbas' in self.args.keyword or 'cbsa' in self.args.keyword):
output = self.fix_relu(output)
return output
class ResNet(nn.Module):
def __init__(self, block, layers, args):
super(ResNet, self).__init__()
self.args = args
assert args is not None, "args is None"
self.layer_count = len(layers)
self.inplanes = 64
self.width_alpha = getattr(args, 'width_alpha', 1.0)
self.inplanes = int(self.inplanes * self.width_alpha)
self.input_channel = self.inplanes
self.feature_stride = 1
if 'cifar10' in args.keyword or 'cifar100' in args.keyword:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.maxpool = nn.Sequential()
elif 'TResNetStem' in args.keyword or 'TResNetStemMaxPool' in args.keyword:
if 'TResNetStemMaxPool' in args.keyword:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv1 = TResNetStem(self.input_channel, stride=2, kernel_size=args.stem_kernel)
else:
self.maxpool = nn.Sequential()
self.conv1 = TResNetStem(self.input_channel, stride=4, kernel_size=args.stem_kernel)
self.feature_stride = 4
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.feature_stride = 4
outplanes = self.inplanes
for i in range(self.layer_count):
index = i + 1
channel_scale = 2 ** i
outplanes = self.input_channel * channel_scale
stride = 1 if i == 0 else 2
setattr(self, 'layer%d' % index, self._make_layer(block, outplanes, layers[i], stride=stride, feature_stride=self.feature_stride))
self.feature_stride = self.feature_stride * stride
if 'preBN' in args.keyword:
self.bn1 = nn.Sequential()
self.bn2 = nn.Sequential(norm(outplanes * block.expansion, args))
if 'patch_preBN_stem' in args.keyword:
if 'fix_pooling' in self.args.keyword:
self.bn1 = nn.Sequential(norm(self.input_channel, args), actv(args))
self.bn2 = nn.Sequential(norm(outplanes * block.expansion, args), actv(args))
else:
self.bn1 = nn.Sequential(norm(self.input_channel, args), actv(args))
self.bn2 = nn.Sequential()
if 'group-net' in args.keyword:
self.bn1[1] = nn.Sequential()
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(outplanes * block.expansion, args.num_classes)
if 'debug' in args.keyword:
logging.info("Resnet has attr '_out_features' %r" % hasattr(self, '_out_features'))
if hasattr(self, '_out_features') and 'linear' not in self._out_features:
self.avgpool = None
self.fc = None
self.bn2 = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if 'zir' in args.keyword:
for m in self.modules():
if isinstance(m, BottleNeck):
fix_bn = getattr(m, 'fix_bn', None)
if fix_bn is not None:
nn.init.constant_(m.fix_bn.weight, 0)
else:
for b in getattr(m, 'bn3', []):
nn.init.constant_(b.weight, 0)
elif isinstance(m, BasicBlock):
fix_bn = getattr(m, 'fix_bn', None)
if fix_bn is not None:
nn.init.constant_(m.fix_bn.weight, 0)
else:
for b in getattr(m, 'bn2', []):
nn.init.constant_(b.weight, 0)
if 'fixup' in args.keyword:
self.stem_relu = actv(args)
self.bn1 = nn.Sequential()
self.bn2 = nn.Sequential()
if 'bias' in args.keyword:
self.fixup_bias1 = nn.Parameter(torch.zeros(1))
self.fixup_bias2 = nn.Parameter(torch.zeros(1))
for m in self.modules():
if isinstance(m, BasicBlock):
for i, n in enumerate(m.conv1):
nn.init.normal_(n.weight, mean=0, std=np.sqrt(2 / (n.weight.shape[0] * np.prod(n.weight.shape[2:]))) * self.layer_count ** (-0.5))
for i, n in enumerate(m.conv2):
nn.init.constant_(n.weight, 0)
for i, n in enumerate(m.skip):
if isinstance(n, nn.Conv2d):
nn.init.normal_(n.weight, mean=0, std=np.sqrt(2 / (n.weight.shape[0] * np.prod(n.weight.shape[2:]))))
elif isinstance(m, BottleNeck):
for i, n in enumerate(m.conv1):
nn.init.normal_(n.weight, mean=0, std=np.sqrt(2 / (n.weight.shape[0] * np.prod(n.weight.shape[2:]))) * self.layer_count ** (-0.25))
for i, n in enumerate(m.conv2):
nn.init.normal_(n.weight, mean=0, std=np.sqrt(2 / (n.weight.shape[0] * np.prod(n.weight.shape[2:]))) * self.layer_count ** (-0.25))
for i, n in enumerate(m.conv3):
nn.init.constant_(n.weight, 0)
for i, n in enumerate(m.skip):
if isinstance(n, nn.Conv2d):
nn.init.normal_(n.weight, mean=0, std=np.sqrt(2 / (n.weight.shape[0] * np.prod(n.weight.shape[2:]))))
elif isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, feature_stride=1):
strides = [stride] + [1]*(blocks-1)
layers = []
for stride in strides:
layers.append(block(self.inplanes, planes, stride, self.args, feature_stride))
feature_stride = feature_stride * stride
self.inplanes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
outputs = {}
x = self.conv1(x)
if 'fixup' in self.args.keyword:
if 'bias' in self.args.keyword:
x = self.stem_relu(x + self.fixup_bias1)
else:
x = self.stem_relu(x)
if 'fix_pooling' in self.args.keyword:
x = self.bn1(x)
x = self.maxpool(x)
else:
x = self.maxpool(x)
x = self.bn1(x)
if hasattr(self, '_out_features') and 'stem' in self._out_features:
outputs["stem"] = x
for i in range(self.layer_count):
layer = 'layer%d' % (i + 1)
x = getattr(self, layer)(x)
if hasattr(self, '_out_features') and layer in self._out_features:
outputs[layer] = x
if hasattr(self, '_out_features') and 'linear' not in self._out_features:
return outputs
#if 'keep_resolution' in self.args.keyword:
# B, C, H, W = x.shape
# if H == 8:
# x = x[:, :, 0:H-1, 0:W-1]
x = self.bn2(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if 'fixup' in self.args.keyword:
if 'bias' in self.args.keyword:
x = x + self.fixup_bias2
x = self.fc(x)
return x
def resnet18(args):
model = ResNet(BasicBlock, [2, 2, 2, 2], args)
return model
def resnet20(args):
model = ResNet(BasicBlock, [3, 3, 3], args)
return model
def resnet32(args):
model = ResNet(BasicBlock, [5, 5, 5], args)
return model
def resnet34(args):
model = ResNet(BasicBlock, [3, 4, 6, 3], args)
return model
def resnet44(args):
model = ResNet(BasicBlock, [7, 7, 7], args)
return model
def resnet50(args):
model = ResNet(BottleNeck, [3, 4, 6, 3], args)
return model
def resnet56(args):
model = ResNet(BasicBlock, [9, 9, 9], args)
return model
def resnet101(args):
model = ResNet(BottleNeck, [3, 4, 23, 3], args)
return model
| 42.320066 | 185 | 0.576786 |
fb55d6f84662720b055f70146650cdaf14a207ec | 7,325 | py | Python | girder/models/file.py | cjh1/girder | 293e238b56a4b7a0a413648b5f324876d577ebb0 | [
"Apache-2.0"
] | null | null | null | girder/models/file.py | cjh1/girder | 293e238b56a4b7a0a413648b5f324876d577ebb0 | [
"Apache-2.0"
] | null | null | null | girder/models/file.py | cjh1/girder | 293e238b56a4b7a0a413648b5f324876d577ebb0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import datetime
from .model_base import Model, ValidationException
from girder.utility import assetstore_utilities
class File(Model):
"""
This model represents a File, which is stored in an assetstore.
"""
def initialize(self):
self.name = 'file'
self.ensureIndices(
['itemId', 'assetstoreId', 'exts'] +
assetstore_utilities.fileIndexFields())
def remove(self, file, updateItemSize=True):
"""
Use the appropriate assetstore adapter for whatever assetstore the
file is stored in, and call deleteFile on it, then delete the file
record from the database.
:param file: The file document to remove.
:param updateItemSize: Whether to update the item size. Only set this
to False if you plan to delete the item and do not care about updating
its size.
"""
if file.get('assetstoreId'):
assetstore = self.model('assetstore').load(file['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
adapter.deleteFile(file)
item = self.model('item').load(file['itemId'], force=True)
self.propagateSizeChange(item, -file['size'], updateItemSize)
Model.remove(self, file)
def download(self, file, offset=0, headers=True):
"""
Use the appropriate assetstore adapter for whatever assetstore the
file is stored in, and call downloadFile on it. If the file is a link
file rather than a file in an assetstore, we redirect to it.
"""
if file.get('assetstoreId'):
assetstore = self.model('assetstore').load(file['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
return adapter.downloadFile(file, offset=offset, headers=headers)
elif file.get('linkUrl'):
if headers:
raise cherrypy.HTTPRedirect(file['linkUrl'])
else:
def stream():
yield file['linkUrl']
return stream
else: # pragma: no cover
raise Exception('File has no known download mechanism.')
def validate(self, doc):
if doc.get('assetstoreId') is None:
if 'linkUrl' not in doc:
raise ValidationException(
'File must have either an assetstore ID or a link URL.',
'linkUrl')
doc['linkUrl'] = doc['linkUrl'].strip()
if not doc['linkUrl'].startswith(('http:', 'https:')):
raise ValidationException(
'Linked file URL must start with http: or https:.',
'linkUrl')
if 'name' not in doc or not doc['name']:
raise ValidationException('File name must not be empty.', 'name')
doc['exts'] = doc['name'].split('.')[1:]
return doc
def createLinkFile(self, name, parent, parentType, url, creator):
"""
Create a file that is a link to a URL rather than something we maintain
in an assetstore.
:param name: The local name for the file.
:type name: str
:param parent: The parent object for this file.
:type parent: folder or item
:param parentType: The parent type (folder or item)
:type parentType: str
:param url: The URL that this file points to
:param creator: The user creating the file.
:type user: user
"""
if parentType == 'folder':
# Create a new item with the name of the file.
item = self.model('item').createItem(
name=name, creator=creator, folder=parent)
elif parentType == 'item':
item = parent
file = {
'created': datetime.datetime.now(),
'itemId': item['_id'],
'creatorId': creator['_id'],
'assetstoreId': None,
'name': name,
'linkUrl': url
}
try:
file = self.save(file)
return file
except ValidationException:
if parentType == 'folder':
self.model('item').remove(item)
raise
def propagateSizeChange(self, item, sizeIncrement, updateItemSize=True):
"""
Propagates a file size change (or file creation) to the necessary
parents in the hierarchy. Internally, this records subtree size in
the item, the parent folder, and the root node under which the item
lives. Should be called anytime a new file is added, a file is
deleted, or a file size changes.
:param item: The parent item of the file.
:type item: dict
:param sizeIncrement: The change in size to propagate.
:type sizeIncrement: int
:param updateItemSize: Whether the item size should be updated. Set to
False if you plan to delete the item immediately and don't care to
update its size.
"""
if updateItemSize:
# Propagate size up to item
self.model('item').increment(query={
'_id': item['_id']
}, field='size', amount=sizeIncrement, multi=False)
# Propagate size to direct parent folder
self.model('folder').increment(query={
'_id': item['folderId']
}, field='size', amount=sizeIncrement, multi=False)
# Propagate size up to root data node
self.model(item['baseParentType']).increment(query={
'_id': item['baseParentId']
}, field='size', amount=sizeIncrement, multi=False)
def createFile(self, creator, item, name, size, assetstore, mimeType):
"""
Create a new file record in the database.
:param item: The parent item.
:param creator: The user creating the file.
:param assetstore: The assetstore this file is stored in.
:param name: The filename.
:type name: str
:param size: The size of the file in bytes.
:type size: int
:param mimeType: The mimeType of the file.
:type mimeType: str
"""
file = {
'created': datetime.datetime.now(),
'itemId': item['_id'],
'creatorId': creator['_id'],
'assetstoreId': assetstore['_id'],
'name': name,
'mimeType': mimeType,
'size': size
}
self.propagateSizeChange(item, size)
return self.save(file)
| 37.757732 | 79 | 0.584846 |
b4c6112d088aca3d44442cce983015699b5736c5 | 291 | py | Python | tests/testlib/__init__.py | Open-Technology/Graph-Database | 0b5bd2414b888c4709af3390f9d41dc8e0d4a863 | [
"BSD-3-Clause"
] | 30 | 2015-02-24T12:47:10.000Z | 2022-02-02T10:41:54.000Z | tests/testlib/__init__.py | Open-Technology/Graph-Database | 0b5bd2414b888c4709af3390f9d41dc8e0d4a863 | [
"BSD-3-Clause"
] | 4 | 2015-08-08T07:30:35.000Z | 2021-11-18T18:50:01.000Z | tests/testlib/__init__.py | Open-Technology/Graph-Database | 0b5bd2414b888c4709af3390f9d41dc8e0d4a863 | [
"BSD-3-Clause"
] | 14 | 2015-02-12T02:55:45.000Z | 2021-11-24T19:43:40.000Z | __all__ = ['logger', 'testoptions', 'testutil']
# fix import paths first so that the right (dev) version of pygr is imported
import pathfix
# import rest of test utils.
import testoptions
import testutil
# make SkipTest available
from unittest_extensions import SkipTest, PygrTestProgram
| 24.25 | 76 | 0.786942 |
2523bac754150e5d90a235854a7dfa29691aafa1 | 311 | py | Python | coursework1/task4/mapper.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task4/mapper.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task4/mapper.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
for line in sys.stdin:
line = line.strip()
tokens = line.split()
if len(tokens) > 3:
for index in range(len(tokens)-3):
print ("%s %s %s %s\t1")%(tokens[index],tokens[index+1],\
tokens[index+2],tokens[index+3])
| 22.214286 | 69 | 0.517685 |
195680d123298a94eebde27db80286875fb1f64b | 2,782 | py | Python | bittrexticker.py | bitsanity/rateboard | f362505d6b9672d2f22bc6e6650975899d2ea127 | [
"Apache-2.0"
] | 3 | 2017-08-24T14:36:49.000Z | 2018-11-21T21:14:06.000Z | bittrexticker.py | bitsanity/rateboard | f362505d6b9672d2f22bc6e6650975899d2ea127 | [
"Apache-2.0"
] | null | null | null | bittrexticker.py | bitsanity/rateboard | f362505d6b9672d2f22bc6e6650975899d2ea127 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys, traceback
import threading
import time
import simplejson as json
import urllib2
from PyQt4 import QtGui,QtCore
from boardlet import Boardlet
from modellet import Modellet
class BittrexTicker(Boardlet):
def __init__(self, parent, targetCurr):
super(BittrexTicker, self).__init__(parent)
self.p_model = Bittrex( targetCurr )
self.initUI()
def initUI(self):
super(BittrexTicker, self).initUI()
self.p_icon = QtGui.QLabel(self)
self.p_icon.setGeometry( self.b_imgx(), self.b_imgy(),
self.b_iconwidth(),self.b_iconheight() )
self.p_icon.setPixmap( QtGui.QPixmap(os.getcwd() + "/img/xmr.png" ) )
t = threading.Thread(target=self.periodicUpdate)
t.setDaemon(True)
t.start()
def paintEvent(self, e):
super(BittrexTicker, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.setPen( self.p_grayPen )
qp.setFont( self.p_pairFont )
qp.drawText( self.b_col1x(), self.b_row1y(),
'Bittrex BTC' + self.p_model.getTargetCurr() )
qp.setPen( self.p_whitePen )
qp.setFont( self.p_normFont )
qp.drawText( self.b_col1x(), self.b_row2y() - 5,
'bid: ' + self.p_model.getBestBid() )
qp.drawText( self.b_col1x(), self.b_row3y() - 5,
'ask: ' + self.p_model.getBestAsk() )
qp.setFont( self.p_timeFont )
qp.setPen( self.p_grayPen )
qp.drawText( self.b_imgx(), self.b_row4y(),
'Refreshed: ' + self.p_model.getLastUpdated() )
qp.end()
def periodicUpdate(self):
while(True):
st = self.getNextWaitTimeSeconds()
time.sleep( st )
self.p_model.doRefresh()
class Bittrex(Modellet):
def __init__(self, targetCurr='XMR'):
self.p_targetCurr = targetCurr
self.p_refreshTime = None
self.p_bestBid = '000.00'
self.p_bestAsk = '000.00'
def getBestBid(self):
return self.p_bestBid
def getBestAsk(self):
return self.p_bestAsk
def getTargetCurr(self):
return self.p_targetCurr
def doRefresh(self):
headers = {'User-agent' : 'Mozilla/5.0'}
req = urllib2.Request( 'https://bittrex.com/api/v1.1/public/getticker?market=BTC-XMR', None, headers )
try:
resp = urllib2.urlopen(req).read()
self.p_bestBid = str( json.loads(resp)['result']['Bid'] )
self.p_bestAsk = str( json.loads(resp)['result']['Ask'] )
super(Bittrex, self).setFaultFlag(False)
super(Bittrex, self).setLastUpdatedNow()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
super(Bittrex, self).setFaultFlag(True)
| 29.284211 | 106 | 0.653846 |
221792c09d1be0bac740ff0b3f19cfab1f086ad1 | 125 | py | Python | check_pyexample.py | phobson/pyexample | 133598954b514a80dc0f65c02c8740b626e569d2 | [
"BSD-3-Clause"
] | null | null | null | check_pyexample.py | phobson/pyexample | 133598954b514a80dc0f65c02c8740b626e569d2 | [
"BSD-3-Clause"
] | null | null | null | check_pyexample.py | phobson/pyexample | 133598954b514a80dc0f65c02c8740b626e569d2 | [
"BSD-3-Clause"
] | null | null | null | import sys
import matplotlib
matplotlib.use('agg')
import pyexample
status = pyexample.test(*sys.argv[1:])
sys.exit(status)
| 15.625 | 38 | 0.768 |
bfd4d8af11069accf59edc84b703d1c021559b4c | 2,299 | py | Python | boa_web/js/plugin.py | atharva-naik/boa_web | 5718135d0e52440a2018b63eabe14cd3f78671bd | [
"MIT"
] | null | null | null | boa_web/js/plugin.py | atharva-naik/boa_web | 5718135d0e52440a2018b63eabe14cd3f78671bd | [
"MIT"
] | null | null | null | boa_web/js/plugin.py | atharva-naik/boa_web | 5718135d0e52440a2018b63eabe14cd3f78671bd | [
"MIT"
] | null | null | null | import os, requests, pathlib
class JSPlugin:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.name = "plugin" # the name of the javasrcipt plugin
self.static = self.kwargs.get("static", "./static/") # path of static folder
self.cdn_urls = kwargs.get("cdn_urls", ["https://www.cdn.com/<plugin>.js"]) # cdn urls to formatted js
self.cdn_min_urls = kwargs.get("cdn_min_urls", ["https://www.cdn.com/<plugin>.min.js"]) # cdn urls to minified js
self.file_map = {}
for url in self.cdn_urls+self.cdn_min_urls:
self.file_map[url] = os.path.join(self.static, pathlib.Path(url).name)
self.download = self.kwargs.get("download", True) # download to static (cdn is used otherwise)
self.force_download = self.kwargs.get("force_download", False) # force download from cdn link every time.
self.use_cdn = not(self.download) # use from cdn
self._codes = [] # save codes if file is being downloaded
self._fetch() # fetch js files if needed
# self.includes = self._includes()
def _fetch(self):
if self.download:
for i,url in enumerate(self.cdn_urls):
path = self.file_map[url]
if self.force_download or not os.path.exists(path):
self._codes.append(requests.get(url).text)
open(path, "w").write(self._codes[-1])
# def _includes(self):
# includes = []
# for url in self.cdn_min_urls:
# if self.use_cdn:
# includes.append(f'''<script src="{url}"></script>''')
# else:
# static_url = "{{ "+f"url_for('static', '{self.name}')"+" }}"
# includes.append(f'''<script src="{static_url}"></script>\n''')
# return includes
def __str__(self):
includes = ""
urls = self.cdn_min_urls if len(self.cdn_min_urls) > len(self.cdn_urls) else self.cdn_urls
for url in self.cdn_min_urls:
if self.use_cdn:
includes += f'''<script src="{url}"></script>\n'''
else:
static_url = "{{ "+f"url_for('static', '{self.name}')"+" }}"
includes += f'''<script src="{static_url}"></script>\n'''
return includes | 47.895833 | 121 | 0.571118 |
437e553987342f51c50887918f3f0ff9535b8a10 | 8,259 | py | Python | opensocial/simplejson/__init__.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | 6 | 2015-01-23T20:09:49.000Z | 2015-01-25T12:28:16.000Z | opensocial/simplejson/__init__.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | opensocial/simplejson/__init__.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module is a subset of YAML,
so it may be used as a serializer for that as well.
"""
__version__ = '1.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
if cls is None:
cls = JSONEncoder
return cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, **kw).encode(obj)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(fp.read())
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
| 37.202703 | 78 | 0.630585 |
763cb952c1ae596479123324a6b588bce60fbaa8 | 1,602 | py | Python | siphon/cdmr/xarray_support.py | dopplershift/siphon | 2508ad35b0e5de12e3ed3af2c64f013439301940 | [
"MIT"
] | null | null | null | siphon/cdmr/xarray_support.py | dopplershift/siphon | 2508ad35b0e5de12e3ed3af2c64f013439301940 | [
"MIT"
] | null | null | null | siphon/cdmr/xarray_support.py | dopplershift/siphon | 2508ad35b0e5de12e3ed3af2c64f013439301940 | [
"MIT"
] | null | null | null | # Copyright (c) 2016 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Implement an experimental backend for using xarray to talk to TDS over CDMRemote."""
from xarray import Variable
from xarray.backends.common import AbstractDataStore
from xarray.core import indexing
from xarray.core.utils import FrozenOrderedDict
from . import Dataset
class CDMRemoteStore(AbstractDataStore):
"""Manage a store for accessing CDMRemote datasets with Siphon."""
def __init__(self, url, deflate=None):
"""Initialize the data store."""
self.ds = Dataset(url)
if deflate is not None:
self.ds.cdmr.deflate = deflate
@staticmethod
def open_store_variable(var):
"""Turn CDMRemote variable into something like a numpy.ndarray."""
data = indexing.LazilyIndexedArray(var)
return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()})
def get_variables(self):
"""Get the variables from underlying data set."""
return FrozenOrderedDict((k, self.open_store_variable(v))
for k, v in self.ds.variables.items())
def get_attrs(self):
"""Get the global attributes from underlying data set."""
return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs())
def get_dimensions(self):
"""Get the dimensions from underlying data set."""
return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items())
| 39.073171 | 90 | 0.689139 |
ff221e9ebe063a85b8b06891fef6542be1a398f7 | 2,241 | py | Python | oslo/torch/nn/parallel/expert_parallel/utils.py | lipovsek/oslo | c2cde6229068808bf691e200f8af8c97c1631eb4 | [
"Apache-2.0"
] | null | null | null | oslo/torch/nn/parallel/expert_parallel/utils.py | lipovsek/oslo | c2cde6229068808bf691e200f8af8c97c1631eb4 | [
"Apache-2.0"
] | null | null | null | oslo/torch/nn/parallel/expert_parallel/utils.py | lipovsek/oslo | c2cde6229068808bf691e200f8af8c97c1631eb4 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
from torch import Tensor
from ._ops import OSLO_EP_KERNEL_FLAG
def get_current_device():
if torch.cuda.is_available():
return torch.device(f"cuda:{torch.cuda.current_device()}")
else:
return torch.device("cpu")
def cum_sum_d0_minus_one(inputs: Tensor):
dim0 = inputs.size(0)
flag = (dim0 <= 1024) or (dim0 <= 2048 and dim0 % 2 == 0) or (dim0 % 4 == 0)
if flag and OSLO_EP_KERNEL_FLAG:
from ._ops import oslo_expert_parallel_cuda
return oslo_expert_parallel_cuda.cumsum_sub_one(inputs)
else:
return torch.cumsum(inputs, dim=0) - 1
def auto_cast_softmax(logit: torch.Tensor, dim: int):
if logit.dtype != torch.float32:
logit = logit.float()
return F.softmax(logit, dim=dim)
class _ForceFP32Parameter(torch.nn.Parameter):
def half(self):
return self.data
class NormalNoiseSampler:
"""
A Class to sample a noisy mask for logit tensor .
All noise is generated from a normal distribution : N(0, 1 / num_experts^2)`
Args:
num_experts (int): the number of experts
"""
def __init__(self, num_experts: int):
device = get_current_device()
mean = torch.tensor(0.0, device=device)
std = torch.tensor(1.0 / num_experts**2, device=device)
self.popl = torch.distributions.normal.Normal(loc=mean, scale=std).rsample
def __call__(self, inputs: torch.Tensor):
noise = self.popl(inputs.shape)
return inputs + noise
class UniformNoiseSampler:
"""
A Class to sample a noisy mask for logit tensor
All noise is generated from a uniform distribution : uniform(1.0 - eps, 1.0 + eps)
Args:
eps (float, optional): Epsilon in generator, defaults 1e-2.
"""
def __init__(self, eps: float = 1e-2):
device = get_current_device()
lower_bound = torch.tensor(1.0 - eps, device=device)
upper_bound = torch.tensor(1.0 + eps, device=device)
self.popl = torch.distributions.uniform.Uniform(
low=lower_bound, high=upper_bound
).rsample
def __call__(self, inputs: torch.Tensor):
noise = self.popl(inputs.shape)
return inputs * noise
| 27.329268 | 86 | 0.656403 |
a130018ecbfe6e399f43567a15a40e5dd8f8b340 | 34,410 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_vpn_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_vpn_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_vpn_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_04_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VpnGateway"]
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.VpnGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VpnGateway"]
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
| 47.991632 | 188 | 0.655681 |
55947787237c36d099e4eb33c69c8bc2b7eb9866 | 3,745 | py | Python | statbus/routes/rounds.py | psykzz/statbus | 67fb9ed487b45cdab36b61e7d037952a13fff492 | [
"MIT"
] | null | null | null | statbus/routes/rounds.py | psykzz/statbus | 67fb9ed487b45cdab36b61e7d037952a13fff492 | [
"MIT"
] | null | null | null | statbus/routes/rounds.py | psykzz/statbus | 67fb9ed487b45cdab36b61e7d037952a13fff492 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
from flask import Blueprint, render_template, request, abort
from playhouse.flask_utils import PaginatedQuery
from statbus.models.player import Player
from statbus.models.round import Round
from statbus.ext import cache
from statbus.utils import github
bp = Blueprint("rounds", __name__)
def rolling_win_rates(game_states, rounds, period=7, granular_hours=24):
results = {}
now = date.today()
tmpl = {}
for state in game_states:
tmpl[state] = -1
for window in range(period):
results[window] = tmpl.copy()
print(results)
@bp.route("/round")
@bp.route("/rounds")
@cache.cached()
def index():
rounds = Round.select().order_by(Round.id.desc())
pages = PaginatedQuery(rounds, 10)
return render_template("rounds/rounds.html", pages=pages)
@bp.route("/rounds/<int:round_id>")
@cache.memoize()
def detail(round_id):
print(datetime.now())
round_info = Round.select().where(Round.id == round_id).first()
if not round_info or not round_info.end_datetime:
abort(404)
print(datetime.now())
pr_list = round_info.merged_prs.values()
print(datetime.now())
balance_prs = github.get_balance_prs(pr_list)
print(datetime.now())
return render_template(
"rounds/round_info.html", round_info=round_info, balance_prs=balance_prs
)
@bp.route("/rounds/<string:player_name>")
@cache.memoize()
def by_player(player_name):
player = Player.select().where(Player.ckey == player_name).first()
if not player:
abort(404)
pages = PaginatedQuery(player.rounds, 10)
return render_template("rounds/rounds.html", pages=pages, for_player=player_name)
@bp.route("/rounds/winrates")
@cache.cached()
def recent_winrates():
colors_template = {
"Xeno": ("rgb(147,112,219)", "rgb(138,43,226)"),
"Marine": ("rgb(30,144,255)", "rgb(0,0,255)"),
"default": ("rgb(211,211,211)", "rgb(175,175,175)"),
}
colors = {}
date_range = min(
30, int(request.args.get("limit", 7))
) # Cap at 30, so we don't hammer a db
rounds = (
Round.select()
.where(
Round.initialize_datetime > (datetime.now() - timedelta(days=date_range))
)
.order_by(Round.id.desc())
)
game_results = set(
[r.game_mode_result for r in rounds if r.game_mode_result is not None]
)
date_range_iter = reversed(range(date_range))
today = date.today()
time_periods = [today - timedelta(days=day) for day in date_range_iter]
day_results = {}
for result in game_results:
# Assign colours - need a better way
if result not in colors:
colors[result] = {}
if result.startswith("Xeno"):
colors[result]["background"] = colors_template["Xeno"][0]
colors[result]["border"] = colors_template["Xeno"][1]
elif result.startswith("Marine"):
colors[result]["background"] = colors_template["Marine"][0]
colors[result]["border"] = colors_template["Marine"][1]
else:
colors[result]["background"] = colors_template["default"][0]
colors[result]["border"] = colors_template["default"][1]
day_results[result] = {}
for period in time_periods:
wins = [
1
for r in rounds
if r.game_mode_result == result
and r.initialize_datetime.day == period.day
]
day_results[result][period] = sum(wins)
return render_template(
"rounds/winrates.html",
labels=time_periods,
day_results=day_results,
colors=colors,
)
| 28.807692 | 85 | 0.622697 |
3c88b1ea2cc8ce3d47898a92fb657b5bef136ca4 | 1,074 | py | Python | tests/ifchain_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | tests/ifchain_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | tests/ifchain_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
@dace.program
def noelse(A: dace.float32[1]):
if A[0] > 0:
@dace.tasklet
def mytask():
o >> A[0]
o = 5
@dace.program
def ifchain(A: dace.float32[1]):
if A[0] > 0:
@dace.tasklet
def mytask():
o >> A[0]
o = 0
if A[0] > 1:
@dace.tasklet
def mytask():
o >> A[0]
o = 1
if A[0] > 0:
@dace.tasklet
def mytask():
o >> A[0]
o = -5
else:
@dace.tasklet
def mytask():
o >> A[0]
o = 9
if __name__ == '__main__':
print('If without else test')
A = np.ndarray([1], np.float32)
A[0] = 1
noelse(A)
if A[0] != 5:
print("ERROR in test: %f != 5" % A[0])
exit(1)
print('If chain test')
ifchain(A)
if A[0] != 9:
print("ERROR in test: %f != 9" % A[0])
exit(1)
print("Success!")
exit(0)
| 17.606557 | 75 | 0.439479 |
cdd8965ce3419cbb6cbc7e54d9e775a988965e09 | 804 | py | Python | analysis/lib/stats/__init__.py | astutespruce/sa-blueprint-sv | 816dbad42b9fcfc771d9852bcb257f3e4606126b | [
"MIT"
] | null | null | null | analysis/lib/stats/__init__.py | astutespruce/sa-blueprint-sv | 816dbad42b9fcfc771d9852bcb257f3e4606126b | [
"MIT"
] | null | null | null | analysis/lib/stats/__init__.py | astutespruce/sa-blueprint-sv | 816dbad42b9fcfc771d9852bcb257f3e4606126b | [
"MIT"
] | null | null | null | from analysis.lib.stats.blueprint import (
extract_by_geometry as extract_blueprint_by_geometry,
summarize_by_huc12 as summarize_bluprint_by_huc12,
summarize_by_marine_block as summarize_bluprint_by_marine_block,
)
from analysis.lib.stats.ownership import (
summarize_by_huc12 as summarize_ownership_by_huc12,
)
from analysis.lib.stats.counties import (
summarize_by_huc12 as summarize_counties_by_huc12,
)
from analysis.lib.stats.urban import (
extract_by_geometry as extract_urban_by_geometry,
summarize_by_huc12 as summarize_urban_by_huc12,
)
from analysis.lib.stats.slr import (
extract_by_geometry as extract_slr_by_geometry,
summarize_by_huc12 as summarize_slr_by_huc12,
)
from analysis.lib.stats.parca import summarize_by_huc12 as summarize_parca_by_huc12
| 30.923077 | 83 | 0.830846 |
f51ec755111a2b98c01218d6780949ec784022b3 | 1,279 | py | Python | barplot FISH results.py | BioJoe/automated-FISH-analyses | c2859fe9ee8fc122e3651537ead3c5ccb7e270a1 | [
"MIT"
] | null | null | null | barplot FISH results.py | BioJoe/automated-FISH-analyses | c2859fe9ee8fc122e3651537ead3c5ccb7e270a1 | [
"MIT"
] | null | null | null | barplot FISH results.py | BioJoe/automated-FISH-analyses | c2859fe9ee8fc122e3651537ead3c5ccb7e270a1 | [
"MIT"
] | null | null | null | #load python included modules
import tkinter as tk
from tkinter import filedialog
#load additional python modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
root = tk.Tk()
root.withdraw()
#parameters to load data
x_axis_name = "genotype"
y_axis_name = "aneuploidy ratio"
#ask for the datafile
file_path = filedialog.askopenfilename(
title = "Select a compiled FISH results file")
data = pd.read_excel(file_path, index_col=None)
ls_gt = list(data['genotype'].unique())
#create figure
plt.figure(figsize=(4,6))
plt.rcParams.update({'font.size': 18})
ax = plt.subplot(1,1,1)
ax.tick_params(axis='x', which='major', labelsize=10)
#plot mean line and standard deviation errorbars
sns.barplot(x=x_axis_name, y=y_axis_name,
data=data, errwidth=1, capsize=0.4,
color="white", ci="sd", linewidth=1,
edgecolor="black", order = ls_gt)
#plot individual cyst dots
sns.stripplot(x=x_axis_name, y=y_axis_name,
data=data, jitter=True, color="green",
size=4, linewidth=0, order = ls_gt)
#format layout
plt.xlabel('')
plt.xticks(rotation=90)
plt.margins(x=None, y=0.2, tight=True)
plt.tight_layout()
#show plot
plt.show()
| 26.102041 | 54 | 0.68491 |
6f624ac4bd9792b25f272f56fe4c7a93d9356f34 | 20,049 | py | Python | mayan/apps/sources/views.py | Fourdee/mayan-edms | 39a94f8b4fed519a3b20ab419e920ea53c11eb84 | [
"Apache-2.0"
] | null | null | null | mayan/apps/sources/views.py | Fourdee/mayan-edms | 39a94f8b4fed519a3b20ab419e920ea53c11eb84 | [
"Apache-2.0"
] | null | null | null | mayan/apps/sources/views.py | Fourdee/mayan-edms | 39a94f8b4fed519a3b20ab419e920ea53c11eb84 | [
"Apache-2.0"
] | 1 | 2020-02-05T18:07:08.000Z | 2020-02-05T18:07:08.000Z | from __future__ import absolute_import, unicode_literals
import logging
from django.contrib import messages
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.encoding import force_text, uri_to_iri
from django.utils.translation import ugettext_lazy as _
from acls.models import AccessControlList
from checkouts.models import NewVersionBlock
from common import menu_facet
from common.models import SharedUploadedFile
from common.utils import encapsulate
from common.views import (
ConfirmView, MultiFormView, SingleObjectCreateView,
SingleObjectDeleteView, SingleObjectEditView, SingleObjectListView
)
from common.widgets import two_state_template
from documents.models import DocumentType, Document
from documents.permissions import (
permission_document_create, permission_document_new_version
)
from documents.tasks import task_upload_new_version
from navigation import Link
from .exceptions import SourceException
from .forms import (
NewDocumentForm, NewVersionForm, WebFormUploadForm, WebFormUploadFormHTML5
)
from .icons import icon_upload_view_link
from .literals import SOURCE_UNCOMPRESS_CHOICE_ASK, SOURCE_UNCOMPRESS_CHOICE_Y
from .models import (
InteractiveSource, Source, SaneScanner, StagingFolderSource
)
from .permissions import (
permission_sources_setup_create, permission_sources_setup_delete,
permission_sources_setup_edit, permission_sources_setup_view,
permission_staging_file_delete
)
from .tasks import task_check_interval_source, task_source_handle_upload
from .utils import get_class, get_form_class, get_upload_form_class
logger = logging.getLogger(__name__)
class SourceLogListView(SingleObjectListView):
view_permission = permission_sources_setup_view
def get_extra_context(self):
return {
'hide_object': True,
'object': self.get_source(),
'title': _('Log entries for source: %s') % self.get_source(),
}
def get_object_list(self):
return self.get_source().logs.all()
def get_source(self):
return get_object_or_404(
Source.objects.select_subclasses(), pk=self.kwargs['pk']
)
class UploadBaseView(MultiFormView):
template_name = 'appearance/generic_form.html'
prefixes = {'source_form': 'source', 'document_form': 'document'}
@staticmethod
def get_tab_link_for_source(source, document=None):
if document:
view = 'sources:upload_version'
args = ('"{}"'.format(document.pk), '"{}"'.format(source.pk),)
else:
view = 'sources:upload_interactive'
args = ('"{}"'.format(source.pk),)
return Link(
args=args,
icon_class=icon_upload_view_link,
keep_query=True,
remove_from_query=['page'],
text=source.label,
view=view,
)
@staticmethod
def get_active_tab_links(document=None):
return [
UploadBaseView.get_tab_link_for_source(source, document)
for source in InteractiveSource.objects.filter(enabled=True).select_subclasses()
]
def dispatch(self, request, *args, **kwargs):
if 'source_id' in kwargs:
self.source = get_object_or_404(
Source.objects.filter(enabled=True).select_subclasses(),
pk=kwargs['source_id']
)
else:
self.source = InteractiveSource.objects.filter(
enabled=True
).select_subclasses().first()
if not InteractiveSource.objects.filter(enabled=True).exists():
messages.error(
request,
_(
'No interactive document sources have been defined or '
'none have been enabled, create one before proceeding.'
)
)
return HttpResponseRedirect(reverse('sources:setup_source_list'))
return super(UploadBaseView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(UploadBaseView, self).get_context_data(**kwargs)
subtemplates_list = []
context['source'] = self.source
if isinstance(self.source, StagingFolderSource):
try:
staging_filelist = list(self.source.get_files())
except Exception as exception:
messages.error(self.request, exception)
staging_filelist = []
finally:
subtemplates_list = [
{
'name': 'appearance/generic_multiform_subtemplate.html',
'context': {
'forms': context['forms'],
'title': _('Document properties'),
}
},
{
'name': 'appearance/generic_list_subtemplate.html',
'context': {
'hide_link': True,
'object_list': staging_filelist,
'title': _('Files in staging path'),
}
},
]
elif isinstance(self.source, SaneScanner):
subtemplates_list.append({
'name': 'sources/upload_multiform_subtemplate.html',
'context': {
'forms': context['forms'],
'is_multipart': True,
'title': _('Document properties'),
'submit_label': _('Scan'),
},
})
else:
subtemplates_list.append({
'name': 'sources/upload_multiform_subtemplate.html',
'context': {
'forms': context['forms'],
'is_multipart': True,
'title': _('Document properties'),
},
})
menu_facet.bound_links['sources:upload_interactive'] = self.tab_links
menu_facet.bound_links['sources:upload_version'] = self.tab_links
context.update({
'subtemplates_list': subtemplates_list,
})
return context
class UploadInteractiveView(UploadBaseView):
def dispatch(self, request, *args, **kwargs):
self.subtemplates_list = []
self.document_type = get_object_or_404(
DocumentType,
pk=self.request.GET.get(
'document_type_id', self.request.POST.get('document_type_id')
)
)
AccessControlList.objects.check_access(
permissions=permission_document_create, user=request.user,
obj=self.document_type
)
self.tab_links = UploadBaseView.get_active_tab_links()
try:
return super(
UploadInteractiveView, self
).dispatch(request, *args, **kwargs)
except Exception as exception:
if request.is_ajax():
return JsonResponse(
data={'error': force_text(exception)}, status=500
)
else:
raise
def forms_valid(self, forms):
if self.source.can_compress:
if self.source.uncompress == SOURCE_UNCOMPRESS_CHOICE_ASK:
expand = forms['source_form'].cleaned_data.get('expand')
else:
if self.source.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y:
expand = True
else:
expand = False
else:
expand = False
try:
uploaded_file = self.source.get_upload_file_object(
forms['source_form'].cleaned_data
)
except SourceException as exception:
messages.error(self.request, exception)
else:
shared_uploaded_file = SharedUploadedFile.objects.create(
file=uploaded_file.file
)
label = None
if 'document_type_available_filenames' in forms['document_form'].cleaned_data:
if forms['document_form'].cleaned_data['document_type_available_filenames']:
label = forms['document_form'].cleaned_data['document_type_available_filenames'].filename
if not self.request.user.is_anonymous:
user_id = self.request.user.pk
else:
user_id = None
try:
self.source.clean_up_upload_file(uploaded_file)
except Exception as exception:
messages.error(self.request, exception)
try:
task_source_handle_upload.apply_async(
kwargs=dict(
description=forms['document_form'].cleaned_data.get('description'),
document_type_id=self.document_type.pk,
expand=expand,
label=label,
language=forms['document_form'].cleaned_data.get('language'),
querystring=uri_to_iri(
'?{}&{}'.format(
self.request.GET.urlencode(), self.request.POST.urlencode()
)
),
shared_uploaded_file_id=shared_uploaded_file.pk,
source_id=self.source.pk,
user_id=user_id,
)
)
except Exception as exception:
message = _(
'Error executing document upload task; '
'%(exception)s, %(exception_class)s'
) % {
'exception': exception,
'exception_class': type(exception),
}
logger.critical(
message, exc_info=True
)
raise type(exception)(message)
else:
messages.success(
self.request,
_(
'New document queued for uploaded and will be available '
'shortly.'
)
)
return HttpResponseRedirect(
'{}?{}'.format(
reverse(
self.request.resolver_match.view_name,
kwargs=self.request.resolver_match.kwargs
), self.request.META['QUERY_STRING']
),
)
def create_source_form_form(self, **kwargs):
if hasattr(self.source, 'uncompress'):
show_expand = self.source.uncompress == SOURCE_UNCOMPRESS_CHOICE_ASK
else:
show_expand = False
return self.get_form_classes()['source_form'](
prefix=kwargs['prefix'],
source=self.source,
show_expand=show_expand,
data=kwargs.get('data', None),
files=kwargs.get('files', None),
)
def create_document_form_form(self, **kwargs):
return self.get_form_classes()['document_form'](
prefix=kwargs['prefix'],
document_type=self.document_type,
data=kwargs.get('data', None),
files=kwargs.get('files', None),
)
def get_form_classes(self):
source_form_class = get_upload_form_class(self.source.source_type)
# Override source form class to enable the HTML5 file uploader
if source_form_class == WebFormUploadForm:
source_form_class = WebFormUploadFormHTML5
return {
'document_form': NewDocumentForm,
'source_form': source_form_class
}
def get_context_data(self, **kwargs):
context = super(UploadInteractiveView, self).get_context_data(**kwargs)
context['title'] = _(
'Upload a local document from source: %s'
) % self.source.label
if not isinstance(self.source, StagingFolderSource) and not isinstance(self.source, SaneScanner):
context['subtemplates_list'][0]['context'].update(
{
'form_action': '{}?{}'.format(
reverse(
self.request.resolver_match.view_name,
kwargs=self.request.resolver_match.kwargs
), self.request.META['QUERY_STRING']
),
'form_class': 'dropzone',
'form_disable_submit': True,
'form_id': 'html5upload',
}
)
return context
class UploadInteractiveVersionView(UploadBaseView):
def dispatch(self, request, *args, **kwargs):
self.subtemplates_list = []
self.document = get_object_or_404(Document, pk=kwargs['document_pk'])
# TODO: Try to remove this new version block check from here
if NewVersionBlock.objects.is_blocked(self.document):
messages.error(
self.request,
_(
'Document "%s" is blocked from uploading new versions.'
) % self.document
)
return HttpResponseRedirect(
reverse(
'documents:document_version_list', args=(self.document.pk,)
)
)
AccessControlList.objects.check_access(
permissions=permission_document_new_version,
user=self.request.user, obj=self.document
)
self.tab_links = UploadBaseView.get_active_tab_links(self.document)
return super(
UploadInteractiveVersionView, self
).dispatch(request, *args, **kwargs)
def forms_valid(self, forms):
try:
uploaded_file = self.source.get_upload_file_object(
forms['source_form'].cleaned_data
)
except SourceException as exception:
messages.error(self.request, exception)
else:
shared_uploaded_file = SharedUploadedFile.objects.create(
file=uploaded_file.file
)
try:
self.source.clean_up_upload_file(uploaded_file)
except Exception as exception:
messages.error(self.request, exception)
if not self.request.user.is_anonymous:
user_id = self.request.user.pk
else:
user_id = None
task_upload_new_version.apply_async(kwargs=dict(
shared_uploaded_file_id=shared_uploaded_file.pk,
document_id=self.document.pk,
user_id=user_id,
comment=forms['document_form'].cleaned_data.get('comment')
))
messages.success(
self.request,
_(
'New document version queued for uploaded and will be '
'available shortly.'
)
)
return HttpResponseRedirect(
reverse(
'documents:document_version_list', args=(self.document.pk,)
)
)
def create_source_form_form(self, **kwargs):
return self.get_form_classes()['source_form'](
prefix=kwargs['prefix'],
source=self.source,
show_expand=False,
data=kwargs.get('data', None),
files=kwargs.get('files', None),
)
def create_document_form_form(self, **kwargs):
return self.get_form_classes()['document_form'](
prefix=kwargs['prefix'],
data=kwargs.get('data', None),
files=kwargs.get('files', None),
)
def get_form_classes(self):
return {
'document_form': NewVersionForm,
'source_form': get_upload_form_class(self.source.source_type)
}
def get_context_data(self, **kwargs):
context = super(
UploadInteractiveVersionView, self
).get_context_data(**kwargs)
context['object'] = self.document
context['title'] = _(
'Upload a new version from source: %s'
) % self.source.label
return context
class StagingFileDeleteView(SingleObjectDeleteView):
object_permission = permission_staging_file_delete
object_permission_related = 'staging_folder'
def get_extra_context(self):
return {
'object': self.get_object(),
'object_name': _('Staging file'),
'source': self.get_source(),
}
def get_object(self):
source = self.get_source()
return source.get_file(
encoded_filename=self.kwargs['encoded_filename']
)
def get_source(self):
return get_object_or_404(
StagingFolderSource, pk=self.kwargs['pk']
)
# Setup views
class SetupSourceCheckView(ConfirmView):
"""
Trigger the task_check_interval_source task for a given source to
test/debug their configuration irrespective of the schedule task setup.
"""
view_permission = permission_sources_setup_view
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Trigger check for source "%s"?') % self.get_object(),
}
def get_object(self):
return get_object_or_404(Source.objects.select_subclasses(), pk=self.kwargs['pk'])
def view_action(self):
task_check_interval_source.apply_async(
kwargs={
'source_id': self.get_object().pk
}
)
messages.success(self.request, _('Source check queued.'))
class SetupSourceCreateView(SingleObjectCreateView):
post_action_redirect = reverse_lazy('sources:setup_source_list')
view_permission = permission_sources_setup_create
def get_form_class(self):
return get_form_class(self.kwargs['source_type'])
def get_extra_context(self):
return {
'object': self.kwargs['source_type'],
'title': _(
'Create new source of type: %s'
) % get_class(self.kwargs['source_type']).class_fullname(),
}
class SetupSourceDeleteView(SingleObjectDeleteView):
post_action_redirect = reverse_lazy('sources:setup_source_list')
view_permission = permission_sources_setup_delete
def get_object(self):
return get_object_or_404(
Source.objects.select_subclasses(), pk=self.kwargs['pk']
)
def get_form_class(self):
return get_form_class(self.get_object().source_type)
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Delete the source: %s?') % self.get_object(),
}
class SetupSourceEditView(SingleObjectEditView):
post_action_redirect = reverse_lazy('sources:setup_source_list')
view_permission = permission_sources_setup_edit
def get_object(self):
return get_object_or_404(
Source.objects.select_subclasses(), pk=self.kwargs['pk']
)
def get_form_class(self):
return get_form_class(self.get_object().source_type)
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Edit source: %s') % self.get_object(),
}
class SetupSourceListView(SingleObjectListView):
extra_context = {
'extra_columns': (
{
'name': _('Type'),
'attribute': encapsulate(lambda entry: entry.class_fullname())
},
{
'name': _('Enabled'),
'attribute': encapsulate(
lambda entry: two_state_template(entry.enabled)
)
},
),
'hide_link': True,
'title': _('Sources'),
}
queryset = Source.objects.select_subclasses()
view_permission = permission_sources_setup_view
| 34.213311 | 109 | 0.576388 |
c27447350f222d1794c16e705b2273133332a592 | 6,262 | py | Python | NT_SSDA/demo_seed_ENT.py | chamwen/NT-Benchmark | d5a17a07fdfa89d80d47843c35ecf3e078b94371 | [
"MIT"
] | 1 | 2022-03-21T16:30:40.000Z | 2022-03-21T16:30:40.000Z | NT_SSDA/demo_seed_ENT.py | chamwen/NT-Benchmark | d5a17a07fdfa89d80d47843c35ecf3e078b94371 | [
"MIT"
] | null | null | null | NT_SSDA/demo_seed_ENT.py | chamwen/NT-Benchmark | d5a17a07fdfa89d80d47843c35ecf3e078b94371 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# A Survey on Negative Transfer
# https://github.com/chamwen/NT-Benchmark
import numpy as np
import argparse
import os
import torch as tr
import torch.nn as nn
import torch.optim as optim
from utils import network, loss, utils
from utils.LogRecord import LogRecord
from utils.dataloader import read_seed_src_tar
from utils.utils import lr_scheduler, fix_random_seed, data_load_noimg_ssda, op_copy
from utils.loss import entropy
def train_target(args):
X_src, y_src, X_tar, y_tar = read_seed_src_tar(args)
dset_loaders = data_load_noimg_ssda(X_src, y_src, X_tar, y_tar, args)
netF, netC = network.backbone_net(args, args.bottleneck)
netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))
netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))
param_group = []
for k, v in netF.named_parameters():
if args.lr_decay1 > 0:
param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]
else:
v.requires_grad = False
for k, v in netC.named_parameters():
if args.lr_decay2 > 0:
param_group += [{'params': v, 'lr': args.lr * args.lr_decay2}]
else:
v.requires_grad = False
optimizer = optim.SGD(param_group)
optimizer = op_copy(optimizer)
max_iter = args.max_epoch * len(dset_loaders["source"])
interval_iter = max_iter // 10
args.max_iter = max_iter
iter_num = 0
netF.train()
netC.train()
while iter_num < max_iter:
try:
inputs_source, labels_source = iter_source.next()
except:
iter_source = iter(dset_loaders["source"])
inputs_source, labels_source = iter_source.next()
try:
inputs_target_tr, labels_target_tr = iter_target_tr.next()
except:
iter_target_tr = iter(dset_loaders["target_tr"])
inputs_target_tr, labels_target_tr = iter_target_tr.next()
try:
inputs_target, _ = iter_target.next()
except:
iter_target = iter(dset_loaders["target_te"])
inputs_target, _ = iter_target.next()
if inputs_source.size(0) == 1:
continue
iter_num += 1
lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)
inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()
inputs_target_tr, labels_target_tr = inputs_target_tr.cuda(), labels_target_tr.cuda()
_, outputs_source = netC(netF(inputs_source))
_, outputs_target_tr = netC(netF(inputs_target_tr))
outputs = tr.cat((outputs_source, outputs_target_tr), dim=0)
labels = tr.cat((labels_source, labels_target_tr), dim=0)
args.lamda = 0.1
loss_classifier = nn.CrossEntropyLoss()(outputs, labels)
inputs_target = inputs_target.cuda()
feas_target = netF(inputs_target)
_, outputs_target = netC(feas_target)
loss_entropy = entropy(outputs_target, args.lamda)
total_loss = loss_classifier + loss_entropy
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# 后续逐epoch保留下训练,验证和测试的loss,准确度,方便后续理解
if iter_num % interval_iter == 0 or iter_num == max_iter:
netF.eval()
netC.eval()
acc_t_te, _ = utils.cal_acc_noimg(dset_loaders["Target"], netF, netC)
log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)
args.log.record(log_str)
print(log_str)
netF.train()
netC.train()
return acc_t_te
if __name__ == '__main__':
data_name = 'SEED'
if data_name == 'SEED': chn, class_num, trial_num = 62, 3, 3394
focus_domain_idx = [0, 1, 2]
# focus_domain_idx = np.arange(15)
domain_list = ['S' + str(i) for i in focus_domain_idx]
num_domain = len(domain_list)
args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,
epsilon=1e-05, layer='wn', smooth=0,
N=num_domain, chn=chn, class_num=class_num)
args.dset = data_name
args.method = 'ENT'
args.backbone = 'ShallowNet'
args.batch_size = 32 # 32
args.max_epoch = 10 # 50 bad performance
args.input_dim = 310
args.norm = 'zscore'
args.bz_tar_tr = args.batch_size
args.bz_tar_te = args.batch_size * 2
args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'
args.noise_rate = 0
dset_n = args.dset + '_' + str(args.noise_rate)
args.tar_lbl_rate = 5 # [5, 10, ..., 50]/100
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
args.data_env = 'gpu' # 'local'
args.seed = 2022
fix_random_seed(args.seed)
tr.backends.cudnn.deterministic = True
print(dset_n, args.method)
print(args)
args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_SSDA/'
args.result_dir = 'results/target/'
my_log = LogRecord(args)
my_log.log_init()
my_log.record('=' * 50 + '\n' + os.path.basename(__file__) + '\n' + '=' * 50)
acc_all = np.zeros(num_domain * (num_domain - 1))
for s in range(num_domain):
for t in range(num_domain):
if s != t:
itr_idx = (num_domain - 1) * s + t
if t > s: itr_idx -= 1
info_str = '\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])
print(info_str)
args.src, args.tar = focus_domain_idx[s], focus_domain_idx[t]
args.task_str = domain_list[s] + '_' + domain_list[t]
print(args)
my_log.record(info_str)
args.log = my_log
acc_all[itr_idx] = train_target(args)
print('\nSub acc: ', np.round(acc_all, 3))
print('Avg acc: ', np.round(np.mean(acc_all), 3))
acc_sub_str = str(np.round(acc_all, 3).tolist())
acc_mean_str = str(np.round(np.mean(acc_all), 3).tolist())
args.log.record("\n==========================================")
args.log.record(acc_sub_str)
args.log.record(acc_mean_str)
| 35.988506 | 112 | 0.598691 |
e66fee70797a514528d1a4491c7a2514a011c7eb | 149,331 | py | Python | keystone/tests/unit/test_v3_federation.py | himanshu-setia/myKeystone | 8a56c161ee29e34e70c6334b048881e8fbbd7514 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_v3_federation.py | himanshu-setia/myKeystone | 8a56c161ee29e34e70c6334b048881e8fbbd7514 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_v3_federation.py | himanshu-setia/myKeystone | 8a56c161ee29e34e70c6334b048881e8fbbd7514 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import random
from testtools import matchers
import uuid
import fixtures
from lxml import etree
import mock
from oslo_config import cfg
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslotest import mockpatch
import saml2
from saml2 import saml
from saml2 import sigver
from six.moves import http_client
from six.moves import range, urllib, zip
xmldsig = importutils.try_import("saml2.xmldsig")
if not xmldsig:
xmldsig = importutils.try_import("xmldsig")
from keystone.auth import controllers as auth_controllers
from keystone.common import environment
from keystone.contrib.federation import routers
from keystone import exception
from keystone.federation import controllers as federation_controllers
from keystone.federation import idp as keystone_idp
from keystone import notifications
from keystone.tests import unit
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
from keystone.tests.unit import utils
from keystone.token.providers import common as token_common
subprocess = environment.subprocess
CONF = cfg.CONF
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
def dummy_validator(*args, **kwargs):
pass
class FederationTests(test_v3.RestfulTestCase):
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_exception_happens(self, mock_deprecator):
routers.FederationExtension(mock.ANY)
mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
args, _kwargs = mock_deprecator.call_args
self.assertIn("Remove federation_extension from", args[1])
class FederatedSetupMixin(object):
ACTION = 'authenticate'
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def _check_domains_are_valid(self, token):
self.assertEqual('Federated', token['user']['domain']['id'])
self.assertEqual('Federated', token['user']['domain']['name'])
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
os_federation = token['user']['OS-FEDERATION']
self.assertIn('groups', os_federation)
self.assertIn('identity_provider', os_federation)
self.assertIn('protocol', os_federation)
self.assertThat(os_federation, matchers.HasLength(3))
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _check_project_scoped_token_attributes(self, token, project_id):
self.assertEqual(project_id, token['project']['id'])
self._check_scoped_token_attributes(token)
def _check_domain_scoped_token_attributes(self, token, domain_id):
self.assertEqual(domain_id, token['domain']['id'])
self._check_scoped_token_attributes(token)
def assertValidMappedUser(self, token):
"""Check if user object meets all the criteria."""
user = token['user']
self.assertIn('id', user)
self.assertIn('name', user)
self.assertIn('domain', user)
self.assertIn('groups', user['OS-FEDERATION'])
self.assertIn('identity_provider', user['OS-FEDERATION'])
self.assertIn('protocol', user['OS-FEDERATION'])
# Make sure user_id is url safe
self.assertEqual(urllib.parse.quote(user['name']), user['id'])
def _issue_unscoped_token(self,
idp=None,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
context = {'environment': environment or {}}
self._inject_assertion(context, assertion)
if idp is None:
idp = self.IDP
r = api.federated_authentication(context, idp, self.PROTOCOL)
return r
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _inject_assertion(self, context, variant, query_string=None):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = query_string or []
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = unit.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
self.domainD = unit.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
self.proj_employees = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
self.project_inherited = unit.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
self.group_employees = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = unit.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
self.role_customer = unit.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
self.role_admin = unit.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access projects via inheritance:
# * domain D
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainD['id'],
inherited_to_projects=True)
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email',
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'Email',
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'Email',
'any_one_of': [
'testacct@example.com'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
# rules with local group names
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_customers['name'],
"domain": {
"name": self.domainA['name']
}
}
}
],
"remote": [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
"type": "orgPersonType",
"any_one_of": [
"CEO",
"CTO"
],
}
]
},
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_admins['name'],
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "orgPersonType",
"any_one_of": [
"Managers"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}",
"id": "{1}"
}
},
{
"group": {
"name": "NON_EXISTING",
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "UserName",
"any_one_of": [
"IamTester"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": self.user['domain_id']
}
}
},
{
"group": {
"id": self.group_customers['id']
}
}
],
"remote": [
{
"type": "UserType",
"any_one_of": [
"random"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": uuid.uuid4().hex
}
}
}
],
"remote": [
{
"type": "Position",
"any_one_of": [
"DirectorGeneral"
]
}
]
}
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Add protocols IDP with remote
self.federation_api.create_protocol(self.idp_with_remote['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.project_inherited['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on its id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CREATED)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex]
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_repeated(self):
"""Creates two IdentityProvider entities with some remote_ids
A remote_id is the same for both so the second IdP is not
created because of the uniqueness of the remote_ids
Expect HTTP 409 Conflict code for the latter call.
"""
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
repeated_remote_id]
self._create_default_idp(body=body)
url = self.base_url(suffix=uuid.uuid4().hex)
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate remote ID',
resp_data.get('error', {}).get('message'))
def test_create_idp_remote_empty(self):
"""Creates an IdP with empty remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = []
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_none(self):
"""Creates an IdP with a None remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = None
resp = self._create_default_idp(body=body)
expected = body.copy()
expected['remote_ids'] = []
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=expected)
def test_update_idp_remote_ids(self):
"""Update IdP's remote_ids parameter."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex]
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_update_idp_clean_remote_ids(self):
"""Update IdP's remote_ids parameter with an empty list."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = []
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_filter_list_idp_by_id(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
idp2_id = get_id(self._create_default_idp())
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by ID.
url = self.base_url() + '?id=' + idp1_id
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_filter_list_idp_by_enabled(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
body = self.default_body.copy()
body['enabled'] = False
idp2_id = get_id(self._create_default_idp(body=body))
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by 'enabled'.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 Conflict code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=http_client.CREATED)
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate entry',
resp_data.get('error', {}).get('message'))
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 Not Found status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_idp_also_deletes_assigned_protocols(self):
"""Deleting an IdP will delete its assigned protocol."""
# create default IdP
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp['id']
protocol_id = uuid.uuid4().hex
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
idp_url = self.base_url(suffix=idp_id)
# assign protocol to IdP
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(
url=url,
idp_id=idp_id,
proto=protocol_id,
**kwargs)
# removing IdP will remove the assigned protocol as well
self.assertEqual(1, len(self.federation_api.list_protocols(idp_id)))
self.delete(idp_url)
self.get(idp_url, expected_status=http_client.NOT_FOUND)
self.assertEqual(0, len(self.federation_api.list_protocols(idp_id)))
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex],
'description': uuid.uuid4().hex,
'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP BAD REQUEST.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body},
expected_status=http_client.BAD_REQUEST)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=http_client.NOT_FOUND)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=http_client.CREATED)
def test_protocol_composite_pk(self):
"""Test that Keystone can add two entities.
The entities have identical names, however, attached to different
IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 Conflict code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': http_client.CONFLICT}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': http_client.NOT_FOUND}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(
idp_id=idp_id,
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 Not Found code for the GET call after the protocol is
deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
class MappingCRUDTests(test_v3.RestfulTestCase):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(entity['rules'], ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=http_client.CREATED)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, http_client.OK)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(1, len(entities))
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, http_client.NO_CONTENT)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
def test_create_mapping_with_blacklist_and_whitelist(self):
"""Test for adding whitelist and blacklist in the rule
Server should respond with HTTP 400 Bad Request error upon discovering
both ``whitelist`` and ``blacklist`` keywords in the same rule.
"""
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_with_local_user_and_local_domain(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={
'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN)
def test_create_mapping_with_ephemeral(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_EPHEMERAL_USER)
def test_create_mapping_with_bad_user_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
# get a copy of a known good map
bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER)
# now sabotage the user type
bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': bad_mapping})
class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2']
super(FederatedTokenTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedTokenTests, self).setUp()
self._notifications = []
def fake_saml_notify(action, context, user_id, group_ids,
identity_provider, protocol, token_id, outcome):
note = {
'action': action,
'user_id': user_id,
'identity_provider': identity_provider,
'protocol': protocol,
'send_notification_called': True}
self._notifications.append(note)
self.useFixture(mockpatch.PatchObject(
notifications,
'send_saml_audit_notification',
fake_saml_notify))
def _assert_last_notify(self, action, identity_provider, protocol,
user_id=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
if user_id:
self.assertEqual(note['user_id'], user_id)
self.assertEqual(note['action'], action)
self.assertEqual(note['identity_provider'], identity_provider)
self.assertEqual(note['protocol'], protocol)
self.assertTrue(note['send_notification_called'])
def load_fixtures(self, fixtures):
super(FederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_issue_unscoped_token_notify(self):
self._issue_unscoped_token()
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL)
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.assertValidMappedUser(r.json['token'])
def test_issue_unscoped_token_disabled_idp(self):
"""Checks if authentication works with disabled identity providers.
Test plan:
1) Disable default IdP
2) Try issuing unscoped token for that IdP
3) Expect server to forbid authentication
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token)
def test_issue_unscoped_token_group_names_in_mapping(self):
r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION')
ref_groups = set([self.group_customers['id'], self.group_admins['id']])
token_resp = r.json_body
token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
token_groups = set([group['id'] for group in token_groups])
self.assertEqual(ref_groups, token_groups)
def test_issue_unscoped_tokens_nonexisting_group(self):
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='ANOTHER_TESTER_ASSERTION')
def test_issue_unscoped_token_with_remote_no_attribute(self):
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_saml2_remote(self):
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_different(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_default_overwritten(self):
"""Test that protocol remote_id_attribute has higher priority.
Make sure the parameter stored under ``protocol`` section has higher
priority over parameter from default ``federation`` configuration
section.
"""
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.config_fixture.config(group='federation',
remote_id_attribute=uuid.uuid4().hex)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
uuid.uuid4().hex: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
# make sure that REMOTE_USER set as the empty string won't interfere
r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(range(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once_notify(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
user_id = r.json['token']['user']['id']
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
def test_scope_to_project_once(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self._check_project_scoped_token_attributes(token_resp, project_id)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_with_idp_disabled(self):
"""Scope token issued by disabled IdP.
Try scoping the token issued by an IdP which is disabled now. Expect
server to refuse scoping operation.
This test confirms correct behaviour when IdP was enabled and unscoped
token was issued, but disabled before user tries to scope the token.
Here we assume the unscoped token was already issued and start from
the moment where IdP is being disabled and unscoped token is being
used.
Test plan:
1) Disable IdP
2) Try scoping unscoped token
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.FORBIDDEN)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(token_resp,
project_id_ref)
def test_scope_to_project_with_only_inherited_roles(self):
"""Try to scope token whose only roles are inherited."""
self.config_fixture.config(group='os_inherit', enabled=True)
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(
token_resp, self.project_inherited['id'])
roles_ref = [self.role_customer]
projects_ref = self.project_inherited
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=http_client.NOT_FOUND)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
self.domainA['id'])
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
domain_id_ref)
def test_scope_to_domain_with_only_inherited_roles_fails(self):
"""Try to scope to a domain that has no direct roles."""
self.v3_create_token(
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_list_projects(self):
urls = ('/OS-FEDERATION/projects', '/auth/projects')
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
self.config_fixture.config(group='os_inherit', enabled=True)
projects_refs = (set([self.proj_customers['id'],
self.project_inherited['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id'],
self.project_inherited['id']]))
for token, projects_ref in zip(token, projects_refs):
for url in urls:
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects_ref, projects,
'match failed for url %s' % url)
# TODO(samueldmq): Create another test class for role inheritance tests.
# The advantage would be to reduce the complexity of this test class and
# have tests specific to this functionality grouped, easing readability and
# maintenability.
def test_list_projects_for_inherited_project_assignment(self):
# Enable os_inherit extension
self.config_fixture.config(group='os_inherit', enabled=True)
# Create a subproject
subproject_inherited = unit.new_project_ref(
domain_id=self.domainD['id'],
parent_id=self.project_inherited['id'])
self.resource_api.create_project(subproject_inherited['id'],
subproject_inherited)
# Create an inherited role assignment
self.assignment_api.create_grant(
role_id=self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_inherited['id'],
inherited_to_projects=True)
# Define expected projects from employee assertion, which contain
# the created subproject
expected_project_ids = [self.project_all['id'],
self.proj_employees['id'],
subproject_inherited['id']]
# Assert expected projects for both available URLs
for url in ('/OS-FEDERATION/projects', '/auth/projects'):
r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION'])
project_ids = [project['id'] for project in r.result['projects']]
self.assertEqual(len(expected_project_ids), len(project_ids))
for expected_project_id in expected_project_ids:
self.assertIn(expected_project_id, project_ids,
'Projects match failed for url %s' % url)
def test_list_domains(self):
urls = ('/OS-FEDERATION/domains', '/auth/domains')
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
# NOTE(henry-nash): domain D does not appear in the expected results
# since it only had inherited roles (which only apply to projects
# within the domain)
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
for url in urls:
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains_ref, domains,
'match failed for url %s' % url)
@utils.wip('This will fail because of bug #1501032. The returned method'
'list should contain "saml2". This is documented in bug '
'1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_create_token(v3_scope_request)
token_resp = r.result['token']
# FIXME(lbragstad): 'token' should be in the list of methods returned
# but it isn't. This is documented in bug 1501032.
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = unit.new_group_ref(domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.v3_create_token(
scoped_token, expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_lists_with_missing_group_in_backend(self):
"""Test a mapping that points to a group that does not exist
For explicit mappings, we expect the group to exist in the backend,
but for lists, specifically blacklists, a missing group is expected
as many groups will be specified by the IdP that are not Keystone
groups.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on group ``EXISTS`` id in it
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
def test_empty_blacklist_passess_all_values(self):
"""Test a mapping with empty blacklist specified
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``.
In both cases, the mapping engine will not discard any groups that are
associated with apache environment variables.
This test checks scenario where an empty blacklist was specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"blacklist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_not_adding_blacklist_passess_all_values(self):
"""Test a mapping without blacklist specified.
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``. In both cases all values will
be accepted and passed.
This test checks scenario where an blacklist was not specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_empty_whitelist_discards_all_values(self):
"""Test that empty whitelist blocks all the values
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks scenario where an empty whitelist was specified.
The expected result is that no groups are matched.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Try issuing unscoped token, expect server to raise
``exception.MissingGroups`` as no groups were matched and ephemeral
user does not have any group assigned.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"whitelist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
self.assertRaises(exception.MissingGroups,
self._issue_unscoped_token,
assertion='UNMATCHED_GROUP_ASSERTION')
def test_not_setting_whitelist_accepts_all_values(self):
"""Test that not setting whitelist passes
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks a scenario where a ``whitelist`` was not specified.
Expected result is that no groups are ignored.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Issue an unscoped token and make sure ephemeral user is a member of
two groups.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non default value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def test_v2_auth_with_federation_token_fails(self):
"""Test that using a federation token with v2 auth fails.
If an admin sets up a federated Keystone environment, and a user
incorrectly configures a service (like Nova) to only use v2 auth, the
returned message should be informative.
"""
r = self._issue_unscoped_token()
token_id = r.headers.get('X-Subject-Token')
self.assertRaises(exception.Unauthorized,
self.token_provider_api.validate_v2_token,
token_id=token_id)
def test_unscoped_token_has_user_domain(self):
r = self._issue_unscoped_token()
self._check_domains_are_valid(r.json_body['token'])
def test_scoped_token_has_user_domain(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
self._check_domains_are_valid(r.result['token'])
def test_issue_unscoped_token_for_local_user(self):
r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION')
token_resp = r.json_body['token']
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertEqual(self.user['id'], token_resp['user']['id'])
self.assertEqual(self.user['name'], token_resp['user']['name'])
self.assertEqual(self.domain['id'], token_resp['user']['domain']['id'])
# Make sure the token is not scoped
self.assertNotIn('project', token_resp)
self.assertNotIn('domain', token_resp)
def test_issue_token_for_local_user_user_not_found(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='ANOTHER_LOCAL_USER_ASSERTION')
class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
AUTH_METHOD = 'token'
def load_fixtures(self, fixtures):
super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def config_overrides(self):
super(FernetFederatedTokenTests, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def auth_plugin_config_override(self):
methods = ['saml2', 'token', 'password']
super(FernetFederatedTokenTests,
self).auth_plugin_config_override(methods)
def test_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
self.assertEqual(204, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_federated_unscoped_token_with_multiple_groups(self):
assertion = 'ANOTHER_CUSTOMER_ASSERTION'
resp = self._issue_unscoped_token(assertion=assertion)
self.assertEqual(226, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_validate_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
unscoped_token = resp.headers.get('X-Subject-Token')
# assert that the token we received is valid
self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token})
def test_fernet_full_workflow(self):
"""Test 'standard' workflow for granting Fernet access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
resp = self._issue_unscoped_token()
self.assertValidMappedUser(resp.json_body['token'])
unscoped_token = resp.headers.get('X-Subject-Token')
resp = self.get('/auth/projects', token=unscoped_token)
projects = resp.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(unscoped_token,
'project', project['id'])
resp = self.v3_create_token(v3_scope_request)
token_resp = resp.result['token']
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedTokenTestsMethodToken(FederatedTokenTests):
"""Test federation operation with unified scoping auth method.
Test all the operations with auth method set to ``token`` as a new, unified
way for scoping all the tokens.
"""
AUTH_METHOD = 'token'
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(FederatedTokenTests,
self).auth_plugin_config_override(methods)
@utils.wip('This will fail because of bug #1501032. The returned method'
'list should contain "saml2". This is documented in bug '
'1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_authenticate_token(v3_scope_request)
token_resp = r.result['token']
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
"""Tests for federated users
Tests new shadow users functionality
"""
def auth_plugin_config_override(self):
methods = ['saml2']
super(FederatedUserTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedUserTests, self).setUp()
def load_fixtures(self, fixtures):
super(FederatedUserTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_user_id_persistense(self):
"""Ensure user_id is persistend for multiple federated authn calls."""
r = self._issue_unscoped_token()
user_id = r.json_body['token']['user']['id']
r = self._issue_unscoped_token()
user_id2 = r.json_body['token']['user']['id']
self.assertEqual(user_id, user_id2)
class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/'
'1.0/rel/identity_provider': {
'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
'href-vars': {
'idp_id': 'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-FEDERATION/1.0/param/idp_id'
},
},
}
def _is_xmlsec1_installed():
p = subprocess.Popen(
['which', 'xmlsec1'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# invert the return code
return not bool(p.wait())
def _load_xml(filename):
with open(os.path.join(XMLDIR, filename), 'r') as xml:
return xml.read()
class SAMLGenerationTests(test_v3.RestfulTestCase):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
ASSERTION_FILE = 'signed_saml2_assertion.xml'
# The values of the following variables match the attributes values found
# in ASSERTION_FILE
ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
SUBJECT = 'test_user'
SUBJECT_DOMAIN = 'user_domain'
ROLES = ['admin', 'member']
PROJECT = 'development'
PROJECT_DOMAIN = 'project_domain'
SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp'
ASSERTION_VERSION = "2.0"
SERVICE_PROVDIER_ID = 'ACME'
def sp_ref(self):
ref = {
'auth_url': self.SP_AUTH_URL,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': self.RECIPIENT,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def setUp(self):
super(SAMLGenerationTests, self).setUp()
self.signed_assertion = saml2.create_class_from_xml_string(
saml.Assertion, _load_xml(self.ASSERTION_FILE))
self.sp = self.sp_ref()
url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
self.put(url, body={'service_provider': self.sp},
expected_status=http_client.CREATED)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertIsNotNone(assertion)
self.assertIsInstance(assertion, saml.Assertion)
issuer = response.issuer
self.assertEqual(self.RECIPIENT, response.destination)
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion.attribute_statement[0].attribute[0]
self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
user_domain_attribute = (
assertion.attribute_statement[0].attribute[1])
self.assertEqual(self.SUBJECT_DOMAIN,
user_domain_attribute.attribute_value[0].text)
role_attribute = assertion.attribute_statement[0].attribute[2]
for attribute_value in role_attribute.attribute_value:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion.attribute_statement[0].attribute[3]
self.assertEqual(self.PROJECT,
project_attribute.attribute_value[0].text)
project_domain_attribute = (
assertion.attribute_statement[0].attribute[4])
self.assertEqual(self.PROJECT_DOMAIN,
project_domain_attribute.attribute_value[0].text)
def test_verify_assertion_object(self):
"""Test that the Assertion object is built properly.
The Assertion doesn't need to be signed in this test, so
_sign_assertion method is patched and doesn't alter the assertion.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
side_effect=lambda x: x):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertEqual(self.ASSERTION_VERSION, assertion.version)
def test_valid_saml_xml(self):
"""Test the generated SAML object can become valid XML.
Test the generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
saml_str = response.to_string()
response = etree.fromstring(saml_str)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertEqual(self.SUBJECT, user_attribute[0].text)
user_domain_attribute = assertion[4][1]
self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text)
role_attribute = assertion[4][2]
for attribute_value in role_attribute:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion[4][3]
self.assertEqual(self.PROJECT, project_attribute[0].text)
project_domain_attribute = assertion[4][4]
self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text)
def test_assertion_using_explicit_namespace_prefixes(self):
def mocked_subprocess_check_output(*popenargs, **kwargs):
# the last option is the assertion file to be signed
filename = popenargs[0][-1]
with open(filename, 'r') as f:
assertion_content = f.read()
# since we are not testing the signature itself, we can return
# the assertion as is without signing it
return assertion_content
with mock.patch.object(subprocess, 'check_output',
side_effect=mocked_subprocess_check_output):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion_xml = response.assertion.to_string()
# make sure we have the proper tag and prefix for the assertion
# namespace
self.assertIn('<saml:Assertion', assertion_xml)
self.assertIn('xmlns:saml="' + saml2.NAMESPACE + '"',
assertion_xml)
self.assertIn('xmlns:xmldsig="' + xmldsig.NAMESPACE + '"',
assertion_xml)
def test_saml_signing(self):
"""Test that the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
if not _is_xmlsec1_installed():
self.skipTest('xmlsec1 is not installed')
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
signature = response.assertion.signature
self.assertIsNotNone(signature)
self.assertIsInstance(signature, xmldsig.Signature)
idp_public_key = sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
cert_text = signature.key_info.x509_data[0].x509_certificate.text
# NOTE(stevemar): Rather than one line of text, the certificate is
# printed with newlines for readability, we remove these so we can
# match it with the key that we used.
cert_text = cert_text.replace(os.linesep, '')
self.assertEqual(idp_public_key, cert_text)
def _create_generate_saml_request(self, token_id, sp_id):
return {
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": token_id
}
},
"scope": {
"service_provider": {
"id": sp_id
}
}
}
}
def _fetch_valid_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def _fetch_domain_scoped_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
user_domain_id=self.domain['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def test_not_project_scoped_token(self):
"""Ensure SAML generation fails when passing domain-scoped tokens.
The server should return a 403 Forbidden Action.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_domain_scoped_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_generate_saml_route(self):
"""Test that the SAML generation endpoint produces XML.
The SAML endpoint /v3/auth/OS-FEDERATION/saml2 should take as input,
a scoped token ID, and a Service Provider ID.
The controller should fetch details about the user from the token,
and details about the service provider from its ID.
This should be enough information to invoke the SAML generator and
provide a valid SAML (XML) document back.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
response = etree.fromstring(http_response.result)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
# NOTE(stevemar): We should test this against expected values,
# but the self.xyz attribute names are uuids, and we mock out
# the result. Ideally we should update the mocked result with
# some known data, and create the roles/project/user before
# these tests run.
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['scope']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_invalid_token_body(self):
"""Test that missing the token in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['identity']['token']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_sp_not_found(self):
"""Test SAML generation with an invalid service provider ID.
Raises exception.ServiceProviderNotFound() - error Not Found 404
"""
sp_id = uuid.uuid4().hex
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id, sp_id)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_sp_disabled(self):
"""Try generating assertion for disabled Service Provider."""
# Disable Service Provider
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_token_not_found(self):
"""Test that an invalid token in the request body raises an exception.
Raises exception.TokenNotFound() - error Not Found 404
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_generate_ecp_route(self):
"""Test that the ECP generation endpoint produces XML.
The ECP endpoint /v3/auth/OS-FEDERATION/saml2/ecp should take the same
input as the SAML generation endpoint (scoped token ID + Service
Provider ID).
The controller should return a SAML assertion that is wrapped in a
SOAP envelope.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
env_response = etree.fromstring(http_response.result)
header = env_response[0]
# Verify the relay state starts with 'ss:mem'
prefix = CONF.saml.relay_state_prefix
self.assertThat(header[0].text, matchers.StartsWith(prefix))
# Verify that the content in the body matches the expected assertion
body = env_response[1]
response = body[0]
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
@mock.patch('saml2.create_class_from_xml_string')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test__sign_assertion(self, check_output_mock,
write_to_tempfile_mock, create_class_mock):
write_to_tempfile_mock.return_value = 'tmp_path'
check_output_mock.return_value = 'fakeoutput'
keystone_idp._sign_assertion(self.signed_assertion)
create_class_mock.assert_called_with(saml.Assertion, 'fakeoutput')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test__sign_assertion_exc(self, check_output_mock,
write_to_tempfile_mock):
# If the command fails the command output is logged.
write_to_tempfile_mock.return_value = 'tmp_path'
sample_returncode = 1
sample_output = self.getUniqueString()
check_output_mock.side_effect = subprocess.CalledProcessError(
returncode=sample_returncode, cmd=CONF.saml.xmlsec1_binary,
output=sample_output)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
"Error when signing assertion, reason: Command '%s' returned "
"non-zero exit status %s %s\n" %
(CONF.saml.xmlsec1_binary, sample_returncode, sample_output))
self.assertEqual(expected_log, logger_fixture.output)
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
def test__sign_assertion_fileutils_exc(self, write_to_tempfile_mock):
exception_msg = 'fake'
write_to_tempfile_mock.side_effect = Exception(exception_msg)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
'Error when signing assertion, reason: %s\n' % exception_msg)
self.assertEqual(expected_log, logger_fixture.output)
class IdPMetadataGenerationTests(test_v3.RestfulTestCase):
"""A class for testing Identity Provider Metadata generation."""
METADATA_URL = '/OS-FEDERATION/saml2/metadata'
def setUp(self):
super(IdPMetadataGenerationTests, self).setUp()
self.generator = keystone_idp.MetadataGenerator()
def config_overrides(self):
super(IdPMetadataGenerationTests, self).config_overrides()
self.config_fixture.config(
group='saml',
idp_entity_id=federation_fixtures.IDP_ENTITY_ID,
idp_sso_endpoint=federation_fixtures.IDP_SSO_ENDPOINT,
idp_organization_name=federation_fixtures.IDP_ORGANIZATION_NAME,
idp_organization_display_name=(
federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME),
idp_organization_url=federation_fixtures.IDP_ORGANIZATION_URL,
idp_contact_company=federation_fixtures.IDP_CONTACT_COMPANY,
idp_contact_name=federation_fixtures.IDP_CONTACT_GIVEN_NAME,
idp_contact_surname=federation_fixtures.IDP_CONTACT_SURNAME,
idp_contact_email=federation_fixtures.IDP_CONTACT_EMAIL,
idp_contact_telephone=(
federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER),
idp_contact_type=federation_fixtures.IDP_CONTACT_TYPE)
def test_check_entity_id(self):
metadata = self.generator.generate_metadata()
self.assertEqual(federation_fixtures.IDP_ENTITY_ID, metadata.entity_id)
def test_metadata_validity(self):
"""Call md.EntityDescriptor method that does internal verification."""
self.generator.generate_metadata().verify()
def test_serialize_metadata_object(self):
"""Check whether serialization doesn't raise any exceptions."""
self.generator.generate_metadata().to_string()
# TODO(marek-denis): Check values here
def test_check_idp_sso(self):
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertEqual(federation_fixtures.IDP_SSO_ENDPOINT,
idpsso_descriptor.single_sign_on_service.location)
self.assertIsNotNone(idpsso_descriptor.organization)
organization = idpsso_descriptor.organization
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME,
organization.organization_display_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_NAME,
organization.organization_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_URL,
organization.organization_url.text)
self.assertIsNotNone(idpsso_descriptor.contact_person)
contact_person = idpsso_descriptor.contact_person
self.assertEqual(federation_fixtures.IDP_CONTACT_GIVEN_NAME,
contact_person.given_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_SURNAME,
contact_person.sur_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_EMAIL,
contact_person.email_address.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER,
contact_person.telephone_number.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TYPE,
contact_person.contact_type)
def test_metadata_no_organization(self):
self.config_fixture.config(
group='saml',
idp_organization_display_name=None,
idp_organization_url=None,
idp_organization_name=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNone(idpsso_descriptor.organization)
self.assertIsNotNone(idpsso_descriptor.contact_person)
def test_metadata_no_contact_person(self):
self.config_fixture.config(
group='saml',
idp_contact_name=None,
idp_contact_surname=None,
idp_contact_email=None,
idp_contact_telephone=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNotNone(idpsso_descriptor.organization)
self.assertEqual([], idpsso_descriptor.contact_person)
def test_metadata_invalid_contact_type(self):
self.config_fixture.config(
group='saml',
idp_contact_type="invalid")
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_sso_endpoint(self):
self.config_fixture.config(
group='saml',
idp_sso_endpoint=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_entity_id(self):
self.config_fixture.config(
group='saml',
idp_entity_id=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_get_metadata_with_no_metadata_file_configured(self):
self.get(self.METADATA_URL,
expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_get_metadata(self):
self.config_fixture.config(
group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
r = self.get(self.METADATA_URL, response_content_type='text/xml')
self.assertEqual('text/xml', r.headers.get('Content-Type'))
reference_file = _load_xml('idp_saml2_metadata.xml')
self.assertEqual(reference_file, r.result)
class ServiceProviderTests(test_v3.RestfulTestCase):
"""A test class for Service Providers."""
MEMBER_NAME = 'service_provider'
COLLECTION_NAME = 'service_providers'
SERVICE_PROVIDER_ID = 'ACME'
SP_KEYS = ['auth_url', 'id', 'enabled', 'description',
'relay_state_prefix', 'sp_url']
def setUp(self):
super(ServiceProviderTests, self).setUp()
# Add a Service Provider
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.SP_REF = self.sp_ref()
self.SERVICE_PROVIDER = self.put(
url, body={'service_provider': self.SP_REF},
expected_status=http_client.CREATED).result
def sp_ref(self):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
return ref
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/service_providers/' + str(suffix)
return '/OS-FEDERATION/service_providers'
def _create_default_sp(self, body=None):
"""Create default Service Provider."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self.sp_ref()
resp = self.put(url, body={'service_provider': body},
expected_status=http_client.CREATED)
return resp
def test_get_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.get(url)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_get_service_provider_fail(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_service_provider(self):
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_create_sp_relay_state_default(self):
"""Create an SP without relay state, should default to `ss:mem`."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
del sp['relay_state_prefix']
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(CONF.saml.relay_state_prefix,
sp_result['relay_state_prefix'])
def test_create_sp_relay_state_non_default(self):
"""Create an SP with custom relay state."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
sp['relay_state_prefix'] = non_default_prefix
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_create_service_provider_fail(self):
"""Try adding SP object with unallowed attribute."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
sp[uuid.uuid4().hex] = uuid.uuid4().hex
self.put(url, body={'service_provider': sp},
expected_status=http_client.BAD_REQUEST)
def test_list_service_providers(self):
"""Test listing of service provider objects.
Add two new service providers. List all available service providers.
Expect to get list of three service providers (one created by setUp())
Test if attributes match.
"""
ref_service_providers = {
uuid.uuid4().hex: self.sp_ref(),
uuid.uuid4().hex: self.sp_ref(),
}
for id, sp in ref_service_providers.items():
url = self.base_url(suffix=id)
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# Insert ids into service provider object, we will compare it with
# responses from server and those include 'id' attribute.
ref_service_providers[self.SERVICE_PROVIDER_ID] = self.SP_REF
for id, sp in ref_service_providers.items():
sp['id'] = id
url = self.base_url()
resp = self.get(url)
service_providers = resp.result
for service_provider in service_providers['service_providers']:
id = service_provider['id']
self.assertValidEntity(
service_provider, ref=ref_service_providers[id],
keys_to_check=self.SP_KEYS)
def test_update_service_provider(self):
"""Update existing service provider.
Update default existing service provider and make sure it has been
properly changed.
"""
new_sp_ref = self.sp_ref()
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
patch_result = resp.result
new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
self.assertValidEntity(patch_result['service_provider'],
ref=new_sp_ref,
keys_to_check=self.SP_KEYS)
resp = self.get(url)
get_result = resp.result
self.assertDictEqual(patch_result['service_provider'],
get_result['service_provider'])
def test_update_service_provider_immutable_parameters(self):
"""Update immutable attributes in service provider.
In this particular case the test will try to change ``id`` attribute.
The server should return an HTTP 403 Forbidden error code.
"""
new_sp_ref = {'id': uuid.uuid4().hex}
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_unknown_parameter(self):
new_sp_ref = self.sp_ref()
new_sp_ref[uuid.uuid4().hex] = uuid.uuid4().hex
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_returns_not_found(self):
new_sp_ref = self.sp_ref()
new_sp_ref['description'] = uuid.uuid4().hex
url = self.base_url(suffix=uuid.uuid4().hex)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.NOT_FOUND)
def test_update_sp_relay_state(self):
"""Update an SP with custom relay state."""
new_sp_ref = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
new_sp_ref['relay_state_prefix'] = non_default_prefix
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.delete(url)
def test_delete_service_provider_returns_not_found(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_filter_list_sp_by_id(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_id = get_id(self._create_default_sp())
# list the SP, should get SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'id'. Only SP1 should appear.
url = self.base_url() + '?id=' + sp1_id
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
def test_filter_list_sp_by_enabled(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_ref = self.sp_ref()
sp2_ref['enabled'] = False
sp2_id = get_id(self._create_default_sp(body=sp2_ref))
# list the SP, should get two SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'enabled'. Only SP1 should appear.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
class WebSSOTests(FederatedTokenTests):
"""A class for testing Web SSO."""
SSO_URL = '/auth/OS-FEDERATION/websso/'
SSO_TEMPLATE_NAME = 'sso_callback_template.html'
SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
TRUSTED_DASHBOARD = 'http://horizon.com'
ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
PROTOCOL_REMOTE_ID_ATTR = uuid.uuid4().hex
def setUp(self):
super(WebSSOTests, self).setUp()
self.api = federation_controllers.Auth()
def config_overrides(self):
super(WebSSOTests, self).config_overrides()
self.config_fixture.config(
group='federation',
trusted_dashboard=[self.TRUSTED_DASHBOARD],
sso_callback_template=self.SSO_TEMPLATE_PATH,
remote_id_attribute=self.REMOTE_ID_ATTR)
def test_render_callback_template(self):
token_id = uuid.uuid4().hex
resp = self.api.render_html_response(self.TRUSTED_DASHBOARD, token_id)
self.assertIn(token_id, resp.body)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_get_sso_origin_host_case_insensitive(self):
# test lowercase hostname in trusted_dashboard
context = {
'query_string': {
'origin': "http://horizon.com",
},
}
host = self.api._get_sso_origin_host(context)
self.assertEqual("http://horizon.com", host)
# test uppercase hostname in trusted_dashboard
self.config_fixture.config(group='federation',
trusted_dashboard=['http://Horizon.com'])
host = self.api._get_sso_origin_host(context)
self.assertEqual("http://horizon.com", host)
def test_federated_sso_auth_with_protocol_specific_remote_id(self):
self.config_fixture.config(
group=self.PROTOCOL,
remote_id_attribute=self.PROTOCOL_REMOTE_ID_ATTR)
environment = {self.PROTOCOL_REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
def test_federated_sso_auth_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.IdentityProviderNotFound,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_query_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
context = {'environment': environment}
query_string = {'origin': uuid.uuid4().hex}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_federated_sso_missing_remote_id(self):
context = {'environment': {}}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
context, self.PROTOCOL)
def test_identity_provider_specific_federated_authentication(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
context = {'environment': environment}
query_string = {'origin': self.ORIGIN}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
resp = self.api.federated_idp_specific_sso_auth(context,
self.idp['id'],
self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
class K2KServiceCatalogTests(test_v3.RestfulTestCase):
SP1 = 'SP1'
SP2 = 'SP2'
SP3 = 'SP3'
def setUp(self):
super(K2KServiceCatalogTests, self).setUp()
sp = self.sp_ref()
self.federation_api.create_sp(self.SP1, sp)
self.sp_alpha = {self.SP1: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP2, sp)
self.sp_beta = {self.SP2: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP3, sp)
self.sp_gamma = {self.SP3: sp}
self.token_v3_helper = token_common.V3TokenDataHelper()
def sp_response(self, id, ref):
ref.pop('enabled')
ref.pop('description')
ref.pop('relay_state_prefix')
ref['id'] = id
return ref
def sp_ref(self):
ref = {
'auth_url': uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': uuid.uuid4().hex,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def _validate_service_providers(self, token, ref):
token_data = token['token']
self.assertIn('service_providers', token_data)
self.assertIsNotNone(token_data['service_providers'])
service_providers = token_data.get('service_providers')
self.assertEqual(len(ref), len(service_providers))
for entity in service_providers:
id = entity.get('id')
ref_entity = self.sp_response(id, ref.get(id))
self.assertDictEqual(entity, ref_entity)
def test_service_providers_in_token(self):
"""Check if service providers are listed in service catalog."""
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_service_provides_in_token_disabled_sp(self):
"""Test behaviour with disabled service providers.
Disabled service providers should not be listed in the service
catalog.
"""
# disable service provider ALPHA
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SP1, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_no_service_providers_in_token(self):
"""Test service catalog with disabled service providers.
There should be no entry ``service_providers`` in the catalog.
Test passes providing no attribute was raised.
"""
sp_ref = {'enabled': False}
for sp in (self.SP1, self.SP2, self.SP3):
self.federation_api.update_sp(sp, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
self.assertNotIn('service_providers', token['token'],
message=('Expected Service Catalog not to have '
'service_providers'))
| 40.447183 | 79 | 0.558652 |
464daba507ffb985e5b91f13eb4bc6631730624c | 1,105 | py | Python | lib/adb_patch.py | uwol1116/FCS_IoT | 1bc589ef24aa6a9cf5e1eee3b884a9ae8574ca0c | [
"MIT"
] | null | null | null | lib/adb_patch.py | uwol1116/FCS_IoT | 1bc589ef24aa6a9cf5e1eee3b884a9ae8574ca0c | [
"MIT"
] | null | null | null | lib/adb_patch.py | uwol1116/FCS_IoT | 1bc589ef24aa6a9cf5e1eee3b884a9ae8574ca0c | [
"MIT"
] | null | null | null | import subprocess
import shutil
import os
def adb_devices():
subprocess.run("adb kill-server")
output = subprocess.check_output('adb devices', text=True).split('\n', 1)[1].split("device")[0].strip()
return output
def auth_check():
command = "adb shell su -c id"
output = subprocess.check_output(command, text=True)
return output
def push_adbkey(path_key):
command = []
command.append("adb push " + path_key + " /sdcard") # adb push adbkey.pub /sdcard
command.append("adb shell su -c cp /sdcard/adbkey.pub /data/misc/adb/adb_keys") # cp pub > adb_keys
command.append("adb shell su -c chown system /data/misc/adb/adb_keys") # chown system
command.append("adb shell su -c chmod 440 /data/misc/adb/adb_keys") # chmod 440
command.append("adb shell su -c rm /sdcard/adbkey.pub") # rm pub
for i in range(len(command)):
subprocess.run(command[i])
def backup_pri_key(pubkey_path):
prikey_path = pubkey_path.split(".pub", 1)[0]
if not os.path.isdir("./backup"):
os.mkdir("./backup")
shutil.copy(prikey_path, "./backup/")
| 31.571429 | 107 | 0.668778 |
851810bcee53833b5c7753d2d624b13595f65bca | 609 | py | Python | flypy/runtime/obj/noneobject.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/runtime/obj/noneobject.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/runtime/obj/noneobject.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | 1 | 2020-01-01T00:43:24.000Z | 2020-01-01T00:43:24.000Z | # -*- coding: utf-8 -*-
"""
Dummy None implementation.
"""
from __future__ import print_function, division, absolute_import
from flypy import overlay
from ... import jit, typeof
@jit
class NoneType(object):
layout = []
@jit('a -> bool')
def __nonzero__(self):
return False
__bool__ = __nonzero__
@jit('a -> a -> bool')
def __eq__(self, other):
return True
@jit('a -> b -> bool')
def __eq__(self, other):
return other is None
NoneValue = NoneType()
@typeof.case(type(None))
def typeof(pyval):
return NoneType[()]
overlay(None, NoneValue) | 16.459459 | 64 | 0.622332 |
3999a10765d601b02013b65dc53300ddd77070f5 | 6,206 | py | Python | SyncNetInstance.py | KeithIMyers/syncnet_python | 543065a22272a8d936fdf2356ec425f88589e379 | [
"CC-BY-4.0"
] | null | null | null | SyncNetInstance.py | KeithIMyers/syncnet_python | 543065a22272a8d936fdf2356ec425f88589e379 | [
"CC-BY-4.0"
] | null | null | null | SyncNetInstance.py | KeithIMyers/syncnet_python | 543065a22272a8d936fdf2356ec425f88589e379 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Video 25 FPS, Audio 16000HZ
import torch
import numpy
import time, pdb, argparse, subprocess, os
import cv2
import python_speech_features
from scipy import signal
from scipy.io import wavfile
from SyncNetModel import *
# ==================== Get OFFSET ====================
def calc_pdist(feat1, feat2, vshift=10):
win_size = vshift*2+1
feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))
dists = []
for i in range(0,len(feat1)):
dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))
return dists
# ==================== MAIN DEF ====================
class SyncNetInstance(torch.nn.Module):
def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):
super(SyncNetInstance, self).__init__();
self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();
def evaluate(self, opt, videofile):
self.__S__.eval();
# ========== ==========
# Load video
# ========== ==========
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
images.append(image)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Load audio
# ========== ==========
audiotmp = os.path.join(opt.tmp_dir,'audio.wav')
command = ("ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s" % (videofile,audiotmp))
output = subprocess.call(command, shell=True, stdout=None)
sample_rate, audio = wavfile.read(audiotmp)
mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))
mfcc = numpy.stack([numpy.array(i) for i in mfcc])
cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)
cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
# ========== ==========
# Check audio and video input length
# ========== ==========
if (float(len(audio))/16000) < (float(len(images))/25) :
print(" *** WARNING: The audio (%.4fs) is shorter than the video (%.4fs). Type 'cont' to continue. *** "%(float(len(audio))/16000,float(len(images))/25))
pdb.set_trace()
# ========== ==========
# Generate video and audio feats
# ========== ==========
lastframe = len(images)-6
im_feat = []
cc_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lip(im_in.cuda());
im_feat.append(im_out.data.cpu())
cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
cc_in = torch.cat(cc_batch,0)
cc_out = self.__S__.forward_aud(cc_in.cuda())
cc_feat.append(cc_out.data.cpu())
im_feat = torch.cat(im_feat,0)
cc_feat = torch.cat(cc_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)
mdist = torch.mean(torch.stack(dists,1),1)
minval, minidx = torch.min(mdist,0)
offset = opt.vshift-minidx
conf = torch.median(mdist) - minval
fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
# fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)
fconf = torch.median(mdist).numpy() - fdist
fconfm = signal.medfilt(fconf,kernel_size=9)
numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Framewise conf: ')
print(fconfm)
print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' % (offset,minval,conf))
dists_npy = numpy.array([ dist.numpy() for dist in dists ])
return offset.numpy(), conf.numpy(), dists_npy
def extract_feature(self, opt, videofile):
self.__S__.eval();
# ========== ==========
# Load video
# ========== ==========
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image_np)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Generate video feats
# ========== ==========
lastframe = len(images)-4
im_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lipfeat(im_in.cuda());
im_feat.append(im_out.data.cpu())
im_feat = torch.cat(im_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
return im_feat
def loadParameters(self, path):
loaded_state = torch.load(path, map_location=lambda storage, loc: storage);
self_state = self.__S__.state_dict();
for name, param in loaded_state.items():
self_state[name].copy_(param);
| 30.875622 | 165 | 0.529649 |
cbe33c895ea29de6bb5e7c1dbc374d8493f9d248 | 10,460 | py | Python | tests/cases/resources/tests/field.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | null | null | null | tests/cases/resources/tests/field.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | null | null | null | tests/cases/resources/tests/field.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | 1 | 2020-01-16T15:26:37.000Z | 2020-01-16T15:26:37.000Z | import json
from django.test.utils import override_settings
from avocado.models import DataField
from avocado.events.models import Log
from .base import BaseTestCase
from tests.models import Project, Title
class FieldResourceTestCase(BaseTestCase):
def test_get_all(self):
response = self.client.get('/api/fields/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), 5)
@override_settings(SERRANO_CHECK_ORPHANED_FIELDS=True)
def test_get_all_orphan(self):
# Orphan one of the fields we are about to retrieve
DataField.objects.filter(pk=2).update(field_name="XXX")
response = self.client.get('/api/fields/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), 4)
@override_settings(SERRANO_CHECK_ORPHANED_FIELDS=False)
def test_get_all_orphan_check_off(self):
# Orphan one of the fields we are about to retrieve
DataField.objects.filter(pk=2).update(field_name="XXX")
response = self.client.get('/api/fields/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), 5)
def test_get_one(self):
# Not allowed to see
response = self.client.get('/api/fields/1/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 404)
response = self.client.get('/api/fields/2/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content))
self.assertTrue(Log.objects.filter(event='read', object_id=2).exists())
@override_settings(SERRANO_CHECK_ORPHANED_FIELDS=True)
def test_get_one_orphan(self):
# Orphan the field before we retrieve it
DataField.objects.filter(pk=2).update(model_name="XXX")
response = self.client.get('/api/fields/2/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 500)
@override_settings(SERRANO_CHECK_ORPHANED_FIELDS=False)
def test_get_one_orphan_check_off(self):
# Orphan one of the fields we are about to retrieve
DataField.objects.filter(pk=2).update(field_name="XXX")
response = self.client.get('/api/fields/2/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
def test_get_privileged(self):
# Superuser sees everything
self.client.login(username='root', password='password')
response = self.client.get('/api/fields/?unpublished=1',
HTTP_ACCEPT='application/json')
self.assertEqual(len(json.loads(response.content)), 12)
response = self.client.get('/api/fields/1/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content))
def test_values(self):
# title.name
response = self.client.get('/api/fields/2/values/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content)['values'])
def test_values_no_limit(self):
# title.name
response = self.client.get('/api/fields/2/values/?limit=0',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertTrue(data['values'])
self.assertFalse('previous' in data['_links'])
self.assertFalse('next' in data['_links'])
def test_values_random(self):
# Random values
response = self.client.get('/api/fields/2/values/?random=3',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)), 3)
def test_values_query(self):
# Query values
response = self.client.get('/api/fields/2/values/?query=a',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['values'], [
{'label': 'Analyst', 'value': 'Analyst'},
{'label': 'Guard', 'value': 'Guard'},
{'label': 'Lawyer', 'value': 'Lawyer'},
{'label': 'Programmer', 'value': 'Programmer'},
{'label': 'QA', 'value': 'QA'},
])
message = Log.objects.get(event='values', object_id=2)
self.assertEqual(message.data['query'], 'a')
def test_values_validate(self):
# Valid, single dict
response = self.client.post('/api/fields/2/values/',
data=json.dumps({'value': 'IT'}),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {
'value': 'IT',
'label': 'IT',
'valid': True,
})
message = Log.objects.get(event='validate', object_id=2)
self.assertEqual(message.data['count'], 1)
# Invalid
response = self.client.post('/api/fields/2/values/',
data=json.dumps({'value': 'Bartender'}),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {
'value': 'Bartender',
'label': 'Bartender',
'valid': False,
})
# Mixed, list
response = self.client.post('/api/fields/2/values/',
data=json.dumps([
{'value': 'IT'},
{'value': 'Bartender'},
{'value': 'Programmer'}
]),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, [
{'value': 'IT', 'label': 'IT', 'valid': True},
{'value': 'Bartender', 'label': 'Bartender', 'valid': False},
{'value': 'Programmer', 'label': 'Programmer', 'valid': True},
])
# Error - no value
response = self.client.post('/api/fields/2/values/',
data=json.dumps({}),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 422)
# Error - type
response = self.client.post('/api/fields/2/values/',
data=json.dumps(None),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 422)
def test_labels_validate(self):
# Valid, single dict
response = self.client.post('/api/fields/2/values/',
data=json.dumps({'label': 'IT'}),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {
'value': 'IT',
'label': 'IT',
'valid': True,
})
def test_mixed_validate(self):
response = self.client.post('/api/fields/2/values/',
data=json.dumps([
{'label': 'IT'},
{'label': 'Bartender'},
{'value': 'Programmer'}
]),
content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, [
{'value': 'IT', 'label': 'IT', 'valid': True},
{'value': 'Bartender', 'label': 'Bartender', 'valid': False},
{'value': 'Programmer', 'label': 'Programmer', 'valid': True},
])
def test_stats(self):
# title.name
response = self.client.get('/api/fields/2/stats/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content))
self.assertTrue(Log.objects.filter(event='stats', object_id=2).exists())
# title.salary
response = self.client.get('/api/fields/3/stats/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content))
self.assertTrue(Log.objects.filter(event='stats', object_id=3).exists())
# project.due_date
response = self.client.get('/api/fields/11/stats/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
stats = json.loads(response.content)
self.assertTrue(stats)
self.assertTrue(Log.objects.filter(event='stats', object_id=11).exists())
self.assertEqual(stats['min'], '2000-01-01')
self.assertEqual(stats['max'], '2010-01-01')
def test_empty_stats(self):
Title.objects.all().delete()
response = self.client.get('/api/fields/2/stats/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response.content))
self.assertTrue(Log.objects.filter(event='stats', object_id=2).exists())
def test_dist(self):
# title.salary
response = self.client.get('/api/fields/3/dist/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {
u'size': 4,
u'clustered': False,
u'outliers': [],
u'data': [{
u'count': 3,
u'values': [15000]
}, {
u'count': 1,
u'values': [10000]
}, {
u'count': 1,
u'values': [20000]
}, {
u'count': 1,
u'values': [200000]
}],
})
self.assertTrue(Log.objects.filter(event='dist', object_id=3).exists())
| 39.029851 | 81 | 0.597992 |
3b202814a1d46aa7c9f76ad4808d343b858887e2 | 441 | py | Python | src/image_capture.py | seandiacono/Nav-AI | 8e49914f3af5ed3dcd3f43725f355347b4894f56 | [
"MIT"
] | 1 | 2021-05-26T15:40:58.000Z | 2021-05-26T15:40:58.000Z | src/image_capture.py | seandiacono/Nav-AI | 8e49914f3af5ed3dcd3f43725f355347b4894f56 | [
"MIT"
] | null | null | null | src/image_capture.py | seandiacono/Nav-AI | 8e49914f3af5ed3dcd3f43725f355347b4894f56 | [
"MIT"
] | null | null | null | import airsim
import time
def image_capture(stop):
i = 0
images = []
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
while True:
print("Image: " + str(i))
image = client.simGetImage(
"bottom_center", airsim.ImageType.DepthVis)
images.append(image)
i = i + 1
if stop():
return images
time.sleep(0.5)
| 22.05 | 55 | 0.582766 |
465bb78a72b847d555fa3933db3c3729ac709d8c | 4,447 | py | Python | dakotathon/variables/uniform_uncertain.py | csdms/dakotathon | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 8 | 2019-09-11T12:59:57.000Z | 2021-08-11T16:31:58.000Z | dakotathon/variables/uniform_uncertain.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 66 | 2015-04-06T17:11:21.000Z | 2019-02-03T18:09:52.000Z | dakotathon/variables/uniform_uncertain.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
] | 5 | 2015-03-24T22:39:34.000Z | 2018-04-21T12:14:05.000Z | """Implementation of a Dakota uniform uncertain variable."""
from .base import VariablesBase
from ..utils import to_iterable
classname = "UniformUncertain"
class UniformUncertain(VariablesBase):
"""Define attributes for Dakota uniform uncertain variables.
The distribution lower and upper bounds are required
specifications; the initial point is optional.
"""
def __init__(
self,
descriptors=("x1", "x2"),
lower_bounds=(-2.0, -2.0),
upper_bounds=(2.0, 2.0),
initial_point=None,
**kwargs
):
"""Create the parameter set for a uniform uncertain variable.
Parameters
----------
descriptors : str or tuple or list of str, optional
Labels for the variables.
initial_point : tuple or list of numbers, optional
Start points used by study variables.
lower_bounds : tuple or list of numbers
Minimum values used by the study variables.
upper_bounds : tuple or list of numbers
Maximum values used by the study variables.
**kwargs
Optional keyword arguments.
Examples
--------
Create a default instance of UniformUncertain with:
>>> v = UniformUncertain()
"""
VariablesBase.__init__(self, **kwargs)
self.variables = self.__module__.rsplit(".")[-1]
self._descriptors = descriptors
self._lower_bounds = lower_bounds
self._upper_bounds = upper_bounds
self._initial_point = initial_point
@property
def lower_bounds(self):
"""Minimum values of study variables."""
return self._lower_bounds
@lower_bounds.setter
def lower_bounds(self, value):
"""Set minimum values of study variables.
Parameters
----------
value : list or tuple of numbers
The minimum values.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Lower bounds must be a tuple or a list")
self._lower_bounds = value
@property
def upper_bounds(self):
"""Maximum values of study variables."""
return self._upper_bounds
@upper_bounds.setter
def upper_bounds(self, value):
"""Set maximum values of study variables.
Parameters
----------
value : list or tuple of numbers
The maximum values.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Upper bounds must be a tuple or a list")
self._upper_bounds = value
@property
def initial_point(self):
"""Start points used by study variables."""
return self._initial_point
@initial_point.setter
def initial_point(self, value):
"""Set start points used by study variables.
Parameters
----------
value : list or tuple of numbers
The new initial points.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Initial points must be a tuple or a list")
self._initial_point = value
def __str__(self):
"""Define the variables block for a uniform uncertain variable.
Examples
--------
Display the variables block created by a default instance of
UniformUncertain:
>>> v = UniformUncertain()
>>> print(v)
variables
uniform_uncertain = 2
descriptors = 'x1' 'x2'
lower_bounds = -2.0 -2.0
upper_bounds = 2.0 2.0
<BLANKLINE>
<BLANKLINE>
See Also
--------
dakotathon.variables.base.VariablesBase.__str__
"""
s = VariablesBase.__str__(self)
if self.lower_bounds is not None:
lower_bounds = to_iterable(self.lower_bounds)
s += "\n" + " lower_bounds ="
for b in lower_bounds:
s += " {}".format(b)
if self.upper_bounds is not None:
upper_bounds = to_iterable(self.upper_bounds)
s += "\n" + " upper_bounds ="
for b in upper_bounds:
s += " {}".format(b)
if self.initial_point is not None:
initial_point = to_iterable(self.initial_point)
s += "\n" + " initial_point ="
for pt in initial_point:
s += " {}".format(pt)
s += "\n\n"
return s
| 28.876623 | 71 | 0.576119 |
be2e6a6a953cfd3f056fd0946074bc143c6b4ee7 | 259 | py | Python | transactions/admin.py | thoth-ky/currency_exhange_app | b1d9495057532c8a24b228e68af98929bb6abdf1 | [
"MIT"
] | null | null | null | transactions/admin.py | thoth-ky/currency_exhange_app | b1d9495057532c8a24b228e68af98929bb6abdf1 | [
"MIT"
] | null | null | null | transactions/admin.py | thoth-ky/currency_exhange_app | b1d9495057532c8a24b228e68af98929bb6abdf1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from transactions.models import Transaction
class TransactionAdmin(admin.ModelAdmin):
list_display = ("id", "source_wallet", "target_wallet", "txn_amount", "status")
admin.site.register(Transaction, TransactionAdmin)
| 23.545455 | 83 | 0.783784 |
60aa039e53725fdd73291a6c509136aa121c0fdb | 1,936 | py | Python | mark-function.py | j-rheinheimer/Mark | 312c58a846e7129c44edccf703b2072e7453ce55 | [
"MIT"
] | null | null | null | mark-function.py | j-rheinheimer/Mark | 312c58a846e7129c44edccf703b2072e7453ce55 | [
"MIT"
] | null | null | null | mark-function.py | j-rheinheimer/Mark | 312c58a846e7129c44edccf703b2072e7453ce55 | [
"MIT"
] | null | null | null | """
MARK project:
Degree conversion analysis
FTIR data
"""
import matplotlib.pyplot as plt
import pandas as pd
import glob
wsl_path = '/mnt/c/Users/jprhe/Dropbox/Codes/Projects/mark/data/*'
linux_path = '/home/rheinheimer/Dropbox/Codes/Projects/mark/data/*'
windows_path = 'C:/Users/jprhe/Dropbox/Codes/Projects/mark/Data/*'
def conversion_degreee(n1, n2, path, time_step, separator, header):
"""
Function that makes the calculus and plot the graphic of the
conversion degree of polymerization about FTIR data.
Parameters:
n1 (int): Value of the first peak
n2 (int): Value of the second peak
path (string): Path to the directory with the data.
Example: '/home/rheinheimer/Dropbox/Codes/Python/lab-mav/mark/data/*'
Observation: The '*' is mandatory
time_step (int/float): Time step of the measures
Examples: 1, 0.2, 3.5
separator (string): The separator between the data columns
Examples: ';' ',' ' '
header (string): Header of the file. If none please insert 'None'
"""
files = glob.glob(
pathname=path
)
degree_vector = []
time_vector = []
cont = 0
for file in sorted(files):
cont = cont + time_step
df = pd.read_csv(
filepath_or_buffer=file,
sep=separator,
header=header,
index_col=False,
engine='python',
)
df.columns = ['wave_number', 'intensity']
degree = df.intensity[n1]/df.intensity[n2]
degree_vector.append(degree)
time_vector.append(cont)
with open('degree_output.txt', 'w') as degree_output:
degree_output.writelines(str(degree_vector))
with open('time_step_output.txt', 'w') as time_step_output:
time_step_output.writelines(str(time_vector))
plt.figure()
plt.xlabel('Time')
plt.ylabel('Conversion degree')
plt.plot(time_vector, degree_vector)
plt.show()
| 26.520548 | 73 | 0.652376 |
03cc824e64947b5018720aba53a0983d9961b7c5 | 1,007 | py | Python | examples/8_graph_from_yaml/main.py | YoshikazuArimitsu/DataProcessingPipiline_template | 5f08e3a072b338e9898a8f889c3f7700ba965bb6 | [
"MIT"
] | null | null | null | examples/8_graph_from_yaml/main.py | YoshikazuArimitsu/DataProcessingPipiline_template | 5f08e3a072b338e9898a8f889c3f7700ba965bb6 | [
"MIT"
] | null | null | null | examples/8_graph_from_yaml/main.py | YoshikazuArimitsu/DataProcessingPipiline_template | 5f08e3a072b338e9898a8f889c3f7700ba965bb6 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from logging import DEBUG, basicConfig, getLogger
from aksdp.task import Task
from aksdp.util import PlantUML
from aksdp.util import graph_factory as gf
logger = getLogger(__name__)
class TaskA(Task):
def __init__(self, params):
super().__init__(params)
def main(self, ds):
logger.info(f"run TaskA, params={self.params}")
return ds
class TaskB(Task):
def main(self, ds):
logger.info(f"run TaskB, params={self.params}")
return ds
class TaskC(Task):
def main(self, ds):
logger.info(f"run TaskC, params={self.params}")
return ds
class TaskD(Task):
def main(self, ds):
logger.info(f"run TaskD, params={self.params}")
return ds
if __name__ == "__main__":
basicConfig(level=DEBUG)
config = Path(os.path.dirname(__file__)) / Path("graph.yml")
graph = gf.create_from_file(config)
graph.run()
print(f"PlantUML Diagraph : {PlantUML.graph_to_url(graph)}")
| 21.425532 | 64 | 0.66137 |
9932f08c05a7a021c7062fccaf100c6ac461d2b7 | 34,338 | py | Python | library_tools/assemble.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 3 | 2018-10-10T22:15:10.000Z | 2020-11-20T15:17:45.000Z | library_tools/assemble.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 2 | 2019-10-31T13:29:05.000Z | 2021-08-12T17:32:32.000Z | library_tools/assemble.py | shiwei23/ImageAnalysis3 | 1d2aa1721d188c96feb55b22fc6c9929d7073f49 | [
"MIT"
] | 2 | 2020-06-04T18:40:52.000Z | 2022-03-18T15:53:05.000Z | import os, glob, sys, time
import pickle
import numpy as np
# biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
# other packages
from . import LibraryDesigner as ld
from . import LibraryTools as lt
# shared variables
from . import _rand_seq_generator
from . import _primer_folder, _readout_folder, _genome_folder
from . import quality_check
# Load probes
def _load_probes_in_folder(report_folder, pb_postfix='.pbr', save_folder=None):
pb_dict = {}
report_files = glob.glob(os.path.join(report_folder, '*'+pb_postfix))
for _file in report_files:
reg_name = os.path.basename(_file).split(pb_postfix)[0]
pbde = ld.pb_reports_class()
pbde.load_pbr(_file)
if save_folder is not None:
_out_file = os.path.join(save_folder, os.path.basename(_file))
pbde.save_file = _out_file
pb_dict[reg_name] = pbde
return pb_dict
# Screen probes
def Screen_fasta_against_fasta(input_fasta, ref_fasta, word_size=17, allowed_hits=8,
check_rc=True, save=True, save_folder=None,
overwrite=False, return_kept_flag=False, verbose=True):
"""Function to screen a given fasta against another reference fasta"""
## Check inputs
if verbose:
print(f"- Screen fasta:{input_fasta} against given fasta file:{ref_fasta}")
if not os.path.isfile(input_fasta):
raise IOError(f"Input fasta:{input_fasta} is not a file.")
if not os.path.isfile(ref_fasta):
raise IOError(f"Reference fasta:{ref_fasta} is not a file.")
word_size = int(word_size)
allowed_hits = int(allowed_hits)
if save_folder is None:
save_folder = os.path.dirname(input_fasta)+'_filtered'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if verbose:
print(f"-- create {save_folder} to store filter probes")
## load probes
with open(input_fasta, 'r') as _handle:
_input_records = []
for _record in SeqIO.parse(_handle, "fasta"):
_input_records.append(_record)
# construct table for ref_fasta
if verbose:
print(f"-- constructing reference table for fasta file")
_ref_names, _ref_seqs = ld.fastaread(ref_fasta, force_upper=True)
_ref_table = ld.OTmap(_ref_seqs, word_size, use_kmer=True)
def Screen_probe_against_fasta(report_folder, ref_fasta, word_size=17, allowed_hits=8,
check_rc=True, save=True, save_folder=None,
overwrite=False, return_kept_flag=False, verbose=True):
"""Function to screen probes in one folder against a given fasta file,
Inputs:
report_folder: folder for probe reports, str of path
ref_fasta: filename for reference fasta file to screen against, string of file path
word_size: word_size used for probe screening, int (default: 17)
allowed_hits: allowed hits for one probe in the fasta, int (default: 8)
check_rc: whether check reverse-complement of the probe, bool (default: True)
save: whether save result probe reports, bool (default: True)
save_folder: folder to save selected probes, string of path (default: None, which means +'_filtered')
overwrite: whether overwrite existing result probe reports, bool (default: False)
return_kept_flag: whether return flags for whether keeping the record, bool (default:False)
verbose: say something!, bool (default: True)
"""
## Check inputs
if verbose:
print(f"- Screen probes against given fasta file:{ref_fasta}")
if not os.path.exists(report_folder):
raise IOError(f"Inpout report_folder not exist!")
if not os.path.isfile(ref_fasta):
raise IOError(f"Reference fasta:{ref_fasta} is not a file.")
word_size = int(word_size)
allowed_hits = int(allowed_hits)
if save_folder is None:
save_folder = report_folder+'_filtered'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if verbose:
print(f"-- create {save_folder} to store filter probes")
## load probes
_pb_dict = _load_probes_in_folder(report_folder)
# screen savefile
for _reg_name, _pb_obj in _pb_dict.items():
if not overwrite and os.path.exists(_pb_obj.save_file):
del(_pb_dict[_reg_name])
if len(_pb_dict) == 0:
if verbose:
print(f"-- no probe loaded, exit.")
return {}
# construct table for ref_fasta
if verbose:
print(f"-- constructing reference table for fasta file")
_ref_names, _ref_seqs = ld.fastaread(ref_fasta, force_upper=True)
_ref_table = ld.OTmap(_ref_seqs, word_size, use_kmer=True)
# filter probes
_filtered_pb_dict = {_reg_name: _pb_obj for _reg_name,
_pb_obj in _pb_dict.items()}
_kept_flag_dict = {_reg_name: [] for _reg_name in _pb_dict}
for _reg_name, _pb_obj in _filtered_pb_dict.items():
if hasattr(_pb_obj, 'pb_reports_keep'):
_probe_dict = getattr(_pb_obj, 'pb_reports_keep')
elif hasattr(_pb_obj, 'kept_probes'):
_probe_dict = getattr(_pb_obj, 'kept_probes')
else:
raise AttributeError('No probe attribute exists.')
_seqs = list(_probe_dict.keys())
_seq_lens = [len(_seq) for _seq in _seqs]
if check_rc:
_hits = [_ref_table.get(_seq, rc=True) +
_ref_table.get(_seq, rc=False)
for _seq in _seqs]
else:
_hits = [_ref_table.get(_seq, rc=False) for _seq in _seqs]
_keep_filter = [_h <= allowed_hits for _h in _hits]
# save
_kept_pbs = {_s: _info for (_s, _info), _keep in zip(
_probe_dict.items(), _keep_filter) if _keep}
if hasattr(_pb_obj, 'pb_reports_keep'):
_filtered_pb_dict[_reg_name].pb_reports_keep = _kept_pbs
elif hasattr(_pb_obj, 'kept_probes'):
_filtered_pb_dict[_reg_name].kept_probes = _kept_pbs
_kept_flag_dict[_reg_name] = np.array(_keep_filter, dtype=np.bool)
if verbose:
print(
f"--- {len(_kept_pbs)} / {len(_keep_filter)} probes kept for {_reg_name}")
if save:
_filtered_pb_dict[_reg_name].save_pbr()
_filtered_pb_dict[_reg_name].save_csv()
if return_kept_flag:
return _filtered_pb_dict, _kept_flag_dict
else:
return _filtered_pb_dict
# load
def load_readouts(_num_readouts, _type='NDB', _num_colors=3, _start_channel=0,
_readout_folder=_readout_folder, _start_id=0, _verbose=True):
"""Function to load readouts into a list"""
# get target files
_readout_files = glob.glob(os.path.join(
_readout_folder, _type) + '_*.fasta')
if len(_readout_files) < _num_colors:
raise IOError(
"Not enough readout files in given readout folder compared to num-colors specified")
_num_per_color = int(np.ceil(_num_readouts / _num_colors))
# load readouts
_multi_readout_lists = []
for _rd_fl in _readout_files[::-1][int(_start_channel):int(_start_channel+_num_colors)]:
_readout_list = []
with open(_rd_fl, 'r') as _rd_handle:
for _readout in SeqIO.parse(_rd_handle, "fasta"):
_readout_list.append(_readout)
_multi_readout_lists.append(
_readout_list[_start_id: _start_id+_num_per_color])
# sort and save
_selected_list = []
while len(_selected_list) < _num_readouts:
_selected_list.append(
_multi_readout_lists[len(_selected_list) % _num_colors].pop(0))
# return
return _selected_list
def load_primers(_picked_sets, _primer_folder=_primer_folder,
_primer_file_tag='_keep.fasta', _verbose=True):
"""Function to extract a pair of primers"""
if not isinstance(_picked_sets, list):
raise ValueError("kwd _picked_sets should be a list!")
_primer_files = glob.glob(os.path.join(
_primer_folder, '*_primers'+_primer_file_tag))
if len(_primer_files) != 2:
raise IOError("_primer_files have more or less two hits")
_primer_sets = []
for _pr_fl, _picked_id in zip(_primer_files, _picked_sets):
with open(_pr_fl, 'r') as _pr_handle:
for _primer in SeqIO.parse(_pr_handle, "fasta"):
if int(_picked_id) == int(_primer.id.split('_')[-1]):
if _verbose:
print("- Picked primer:", _primer)
_primer_sets.append(_primer)
return _primer_sets
def _assemble_single_probe(_target, _readout_list, _fwd_primer, _rev_primer,
_primer_len=20, _readout_len=20, _target_len=42,
_add_rand_gap=0):
"""Assemble one probe sequence
All required inputs are SeqRecords
The reversal funciton is: quality_check._parsing_probe_sequence
"""
# initialize final sequence with fwd primer
_seq = _fwd_primer[-_primer_len:]
# append half of readouts on 5'
for _i in range(int(len(_readout_list)/2)):
_seq += _readout_list[_i][-_readout_len:].reverse_complement()
_seq += _rand_seq_generator(_add_rand_gap)
# append target region
_seq += _target[-_target_len:]
_seq += _rand_seq_generator(_add_rand_gap)
# append other half of readouts on 3'
for _i in range(int(len(_readout_list)/2), len(_readout_list)):
_seq += _readout_list[_i][-_readout_len:].reverse_complement()
if _i < len(_readout_list)-1:
_seq += _rand_seq_generator(_add_rand_gap)
# append reverse_complement
_seq += _rev_primer[-_primer_len:].reverse_complement()
_seq.description = ''
return _seq
def _assemble_single_probename(_pb_info, _readout_name_list, _pb_id, _fwd_primer, _rev_primer):
"""Assemble one probe name by given probe info
_pb_info is one of values in pb_designer.pb_reports_keep or pb_designer.kept_probes"""
# region
_name = ['loc']
if 'loc_' in _pb_info['reg_name']:
_name.append( _pb_info['reg_name'].split('loc_')[1].split('_')[0] )
else:
_name.append(_pb_info['reg_name'].split('_')[0])
# gene
if 'gene_' in _pb_info['reg_name']:
_name.extend(['gene', _pb_info['reg_name'].split('gene_')[1].split('_')[0]])
# gene
elif 'reg_' in _pb_info['reg_name']:
_name.extend(['gene', _pb_info['reg_name'].split('reg_')[1].split('_')[0]])
# pb_index
_name.extend(['pb', str(_pb_id)])
# probe position
_name.extend(['pos', str(_pb_info['pb_index'])])
# if strand specified, add
if 'strand' in _pb_info:
_name.extend( ['strand', str(_pb_info['strand'])] )
# append readout
_name.extend( ['readouts', '[' + ','.join(_readout_name_list) + ']'] )
# append primer
_name.extend( ['primers', '[' + ','.join([_fwd_primer.id, _rev_primer.id]) + ']'] )
return '_'.join(_name)
# function to assemble probes in the whole library
def Assemble_probes(library_folder, probe_source, gene_readout_dict,
readout_dict, primers,
rc_targets=False, add_rand_gap=0,
primer_len=20, readout_len=20, target_len=42,
num_readout_per_probe=None, unique_readout_per_probe=False,
save=True, save_name='candidate_probes.fasta', save_folder=None,
overwrite=True, verbose=True):
"""Function to Assemble_probes by given probe_soruce, gene_readout_dict, readout_dict and primers,
Inputs:
library_folder: path to the library, str of path
probe_source: source for probes, str or list of SeqRecords
gene_readout_dict: dict of gene/region -> list of readouts used
readout_dict: dict of readout_type ('u'|'c'|'m') -> list of readout SeqRecords
primers: list of two SeqRecords for forward and reverse primers, list of SeqRecords
rc_targets: whether reverse-complement target sequences, bool (default: False)
add_rand_gap: number of random sequence added between readout biding sites, int (default: 0)
primer_len: length of primer sequence, int (default: 20)
readout_len: length of each readout-binding site, int (default: 20)
target_len: length of targeting region, int (default: 42)
num_readout_per_probe: number of readouts in individual probes, int (default: None, all readouts given in gene_readout_dict)
unique_readout_per_probe: whether individual probe has only one unique readout, bool (default: False)
save: whether save result probes as fasta, bool (default: True)
save_name: file basename of saved fasta file, str (default: 'candidate_probes.fasta')
save_folder: folder for saved fasta file, str (default: None, which means library_folder)
overwrite: whether overwrite existing file, bool (default: True)
verbose: say something!, bool (default: True)
Outputs:
cand_probes: list of probes that assembled, list of SeqRecords
readout_summary: summary dict of readout used in every region, dict of str -> list
"""
## Check inputs
if verbose:
print(f"- Assemble probes by given target sequences, readouts and primers.")
if not os.path.isdir(library_folder):
raise ValueError(
f"Wrong input :{library_folder}, should be path to library folder")
# probe source
if isinstance(probe_source, str):
if not os.path.isdir(probe_source):
report_folder = os.path.join(library_folder, probe_source)
else:
report_folder = probe_source
if not os.path.isdir(report_folder):
raise ValueError(
f"Wrong input :{report_folder}, should be path to probes")
# load probes
_pb_dict = _load_probes_in_folder(report_folder)
elif isinstance(probe_source, dict):
_pb_dict = probe_source
# gene_readout_dict and readout_dict
if not isinstance(gene_readout_dict, dict):
raise TypeError(f"Wrong input type for gene_readout_dict, \
should be dict but {type(gene_readout_dict)} is given")
if not isinstance(readout_dict, dict):
raise TypeError(f"Wrong input type for readout_dict, \
should be dict but {type(readout_dict)} is given")
# check readout types
_readout_types = []
for _reg_name, _readout_markers in gene_readout_dict.items():
for _mk in _readout_markers:
if _mk[0] not in _readout_types:
_readout_types.append(_mk[0])
if _mk[0] not in readout_dict:
raise KeyError(f"{_mk[0]} type readout is not included in readout_dict")
if verbose:
print(f"-- included readout types: {_readout_types}")
# check number of readouts_per_probe
if num_readout_per_probe is None:
_num_readouts = None
else:
_num_readouts = int(num_readout_per_probe)
_unique_readout_pb = bool(unique_readout_per_probe)
# check savefile
if save_folder is None:
save_folder = library_folder
if not os.path.exists(save_folder):
os.makedirs(save_folder)
if '.fasta' not in save_name:
save_name += '.fasta'
## start asembling probes
cand_probes = [] # initialize
readout_summary = {_t: {_reg_name: []
for _reg_name in _pb_dict} for _t in _readout_types}
# primers shared by the library
fwd_primer, rev_primer = primers
for _reg_name, _pb_obj in _pb_dict.items():
_reg_readout_info = gene_readout_dict[_reg_name]
_reg_readouts = []
_reg_readout_names = []
for _mk in _reg_readout_info:
_type = _mk[0]
_ind = int(_mk[1:])
_sel_readout = readout_dict[_type][_ind]
_reg_readouts.append(_sel_readout)
_reg_readout_names.append(_sel_readout.id + '_' + _type)
#print(readout_summary[_type][_reg_name], _sel_readout)
if len(readout_summary[_type][_reg_name])==0 \
or _sel_readout.id not in [_rd.id for _rd in readout_summary[_type][_reg_name]]:
readout_summary[_type][_reg_name].append(_sel_readout)
if isinstance(_pb_obj, ld.pb_reports_class):
# new version of probe_designer
if hasattr(_pb_obj, 'kept_probes'):
_probe_dict = getattr(_pb_obj, 'kept_probes')
# old version of probe_designer
elif hasattr(_pb_obj, 'pb_reports_keep'):
_probe_dict = getattr(_pb_obj, 'pb_reports_keep')
else:
raise AttributeError('No probe attribute exists.')
if verbose:
print(f"--- assemblying {len(_probe_dict)} probes in region: {_reg_name}")
# loop through probes
for _i, (_seq, _info) in enumerate(_probe_dict.items()):
if isinstance(_seq, bytes):
_seq = _seq.decode()
if rc_targets:
_target = SeqRecord(Seq(_seq), id=_info['name']).reverse_complement()
else:
_target = SeqRecord(Seq(_seq), id=_info['name'])
# decide readout used in this probe
if _num_readouts is None:
if _unique_readout_pb:
_pb_readouts = [_reg_readouts[_i%len(_reg_readouts)]] * len(_reg_readouts)
_pb_readout_names = [_reg_readout_names[_i%len(_reg_readouts)]] * len(_reg_readouts)
else:
_pb_readouts = _reg_readouts
_pb_readout_names = _reg_readout_names
else:
if _unique_readout_pb:
_pb_readouts = [_reg_readouts[_i%len(_reg_readouts)]] * _num_readouts
_pb_readout_names = [_reg_readout_names[_i%len(_reg_readouts)]] * _num_readouts
else:
_pb_readouts = [_rd for _j, _rd in enumerate(_reg_readouts) if (_j+_i)%len(_reg_readouts) < _num_readouts ]
_pb_readout_names = [_name for _j, _name in enumerate(_reg_readout_names) if (_j+_i)%len(_reg_readouts) < _num_readouts ]
# Assemble the probe
_probe = _assemble_single_probe(_target, _pb_readouts, fwd_primer, rev_primer,
_primer_len=primer_len, _readout_len=readout_len,
_target_len=target_len, _add_rand_gap=add_rand_gap)
_name = _assemble_single_probename(_info, _pb_readout_names, _i, fwd_primer, rev_primer)
_probe.id = _name
_probe.name, _probe.description = '', ''
cand_probes.append(_probe)
elif isinstance(_pb_obj, list):
if verbose:
print( f"--- assemblying {len(_pb_obj)} probes in region: {_reg_name}")
# case that a list is provided
for _i, _info in enumerate(_pb_obj):
# target
if rc_targets:
_target = SeqRecord(Seq(_info['sequence'])).reverse_complement()
else:
_target = SeqRecord(Seq(_info['sequence']))
# decide readout used in this probe
if _num_readouts is None:
if _unique_readout_pb:
_pb_readouts = [_reg_readouts[_i%len(_reg_readouts)]] * len(_reg_readouts)
_pb_readout_names = [_reg_readout_names[_i%len(_reg_readouts)]] * len(_reg_readouts)
else:
_pb_readouts = _reg_readouts
_pb_readout_names = _reg_readout_names
else:
if _unique_readout_pb:
_pb_readouts = [_reg_readouts[_i%len(_reg_readouts)]] * _num_readouts
_pb_readout_names = [_reg_readout_names[_i%len(_reg_readouts)]] * _num_readouts
else:
_pb_readouts = [_rd for _j, _rd in enumerate(_reg_readouts) if (_j+_i)%len(_reg_readouts) < _num_readouts ]
_pb_readout_names = [_name for _j, _name in enumerate(_reg_readout_names) if (_j+_i)%len(_reg_readouts) < _num_readouts ]
# Assemble the probe
_probe = _assemble_single_probe(_target, _pb_readouts, fwd_primer, rev_primer,
_primer_len=primer_len, _readout_len=readout_len,
_target_len=target_len, _add_rand_gap=add_rand_gap)
_name = _assemble_single_probename(_info, _pb_readout_names, _i, fwd_primer, rev_primer)
#_name = _info['region']+'_gene_'+_info['gene']+'_pb_'+str(_i) +\
# '_pos_'+str(_info['position'])+'_readouts_[' + \
# ','.join(_pb_readout_names) + ']'
_probe.id = _name
_probe.name, _probe.description = '', ''
cand_probes.append(_probe)
if verbose:
print(f"-- {len(cand_probes)} probes assembled in total.")
if save:
# save cand_probe
_save_filename = os.path.join(save_folder, save_name)
if not os.path.isfile(_save_filename) or overwrite:
if verbose:
print(
f"-- saving {len(cand_probes)} probes into file:{_save_filename}")
with open(_save_filename, 'w') as _output_handle:
SeqIO.write(cand_probes, _output_handle, "fasta")
# save readout_summary, coupled with save_filename
_readout_summary_filename = os.path.join(
save_folder, 'readout_summary.pkl')
if verbose:
print(
f"-- saving readout_summary into file:{_readout_summary_filename}")
pickle.dump(readout_summary, open(_readout_summary_filename, 'wb'))
else:
print(f"cand_probe file:{_save_filename} already exists, skip.")
return cand_probes, readout_summary
def Select_subset(input_probes, select_num=None, select_size=None,
region_marker='reg_', position_marker='pos_',
select_mode='mid', return_dict=False,
save=True, save_folder=None, save_name=None, verbose=True):
"""Function to select a subset of a given library"""
## check inputs
if verbose:
print(f"- Start selecting sub_library, n={select_num},size={select_size}")
if isinstance(input_probes, str):
if os.path.isfile(input_probes):
with open(input_probes, 'r') as _handle:
_pb_records = []
for _record in SeqIO.parse(_handle, "fasta"):
_pb_records.append(_record)
else:
raise IOError(f"Input file:{input_probes} not exists!")
elif isinstance(input_probes, list) and isinstance(input_probes[0], SeqRecord):
_pb_records = input_probes
else:
raise TypeError(f"Wrong input type of input_probes, should be list of SeqRecords or string of file-path")
if select_num is None and select_size is None:
raise ValueError(f"At least one of select_num and select_size should be given. ")
# probe dict
pb_dic = quality_check.split_probe_by_gene(_pb_records, species_marker=region_marker)
# keep record of region length
_reg_size_dic = {}
# sort probes and also check region size
for _reg_id, _pbs in pb_dic.items():
_sorted_pbs = sorted(_pbs, key=lambda v:int(v.id.split(position_marker)[1].split('_')[0]))
pb_dic[_reg_id] = _sorted_pbs
_start, _end = _pbs[0].id.split(':')[1].split('_')[0].split('-')
_start, _end = int(_start), int(_end)
_reg_size_dic[_reg_id] = abs(_end - _start)
# select probes
sel_pb_dic = {_reg_id:[] for _reg_id in pb_dic}
for _reg_id, _pbs in pb_dic.items():
_pb_coords = np.array([int(_p.id.split(position_marker)[1].split('_')[0]) for _p in _pbs])
if select_mode == 'mid':
_metric = np.abs(_pb_coords - _reg_size_dic[_reg_id]/2)
if select_size is not None:
_sel_inds = np.where(_metric <= select_size/2)[0]
if select_num is not None and len(_sel_inds) < select_num:
_sel_inds = np.argsort(_metric)[:select_num]
elif select_num is not None:
_sel_inds = np.argsort(_metric)[:select_num]
elif select_mode == 'left':
_metric = _pb_coords
if select_size is not None:
_sel_inds = np.where(_metric <= select_size/2)[0]
if select_num is not None and len(_sel_inds) < select_num:
_sel_inds = np.argsort(_metric)[:select_num]
elif select_num is not None:
_sel_inds = np.argsort(_metric)[:select_num]
elif select_mode == 'right':
_metric = _reg_size_dic[_reg_id] - _pb_coords
if select_size is not None:
_sel_inds = np.where(_metric <= select_size/2)[0]
if select_num is not None and len(_sel_inds) < select_num:
_sel_inds = np.argsort(_metric)[:select_num]
elif select_num is not None:
_sel_inds = np.argsort(_metric)[:select_num]
else:
raise ValueError(f"Wrong input select_mode. ")
# select probes
sel_pb_dic[_reg_id] = [_p for _i,_p in enumerate(_pbs) if _i in _sel_inds.astype(np.int)]
if verbose:
print(f"--- {len(sel_pb_dic[_reg_id])} probes kept for region:{_reg_id}")
# summarize into list of pb_records
_sel_pb_records = []
for _reg_id, _pbs in sel_pb_dic.items():
_sel_pb_records += _pbs
# save
if save:
if isinstance(input_probes, str):
save_basename = os.path.basename(input_probes)
else:
save_basename = 'sub_probes.fasta'
if isinstance(input_probes, str) and save_folder is None:
save_folder = os.path.dirname(input_probes)
elif save_folder is not None:
if not os.path.exists(save_folder):
os.makedirs(save_folder)
else:
raise ValueError(f"save_folder is not given!")
# add postfix
if select_num is not None:
save_basename = save_basename.replace('.fasta', f'_n_{select_num}.fasta')
if select_size is not None:
save_basename = save_basename.replace('.fasta', f'_size_{select_size}.fasta')
save_filename = os.path.join(save_folder, save_basename)
with open(save_filename, 'w') as _output_handle:
if verbose:
print(f"-- saving to file: {save_filename} with {len(_sel_pb_records)} probes")
SeqIO.write(_sel_pb_records, _output_handle, "fasta")
if return_dict:
return sel_pb_dic
else:
return _sel_pb_records
def Replace_primers(input_probes, primers, primer_len=20, primer_nametag='primer_',
save=True, save_folder=None, save_name=None,
verbose=True):
"""Function to replace primer set in given library"""
if verbose:
print(f"- Start replacing primers")
if isinstance(input_probes, str):
if os.path.isfile(input_probes):
with open(input_probes, 'r') as _handle:
_pb_records = []
for _record in SeqIO.parse(_handle, "fasta"):
_pb_records.append(_record)
else:
raise IOError(f"Input file:{input_probes} not exists!")
elif isinstance(input_probes, list) and isinstance(input_probes[0], SeqRecord):
_pb_records = input_probes
else:
raise TypeError(f"Wrong input type of input_probes, should be list of SeqRecords or string of file-path")
# get primers
fwd_primer, rev_primer = primers
fwd_seq = fwd_primer[-primer_len:].seq
rev_seq = rev_primer[-primer_len:].reverse_complement().seq
# update primers
_updated_records = []
for _pb in _pb_records:
_new_pb = _pb
_new_pb.seq = fwd_seq + _pb.seq[primer_len:-primer_len] + rev_seq
_new_pb.name, _new_pb.description = '',''
_updated_records.append(_new_pb)
if primer_nametag in _pb.id:
_new_pb.id = _pb.id.split(primer_nametag)[0]+primer_nametag+\
f"[{fwd_primer.id.split('_')[-1]},{rev_primer.id.split('_')[-1]}]"+\
_pb.id.split(_pb.id.split(primer_nametag)[1].split('_')[0])[1]
if save:
if isinstance(input_probes, str):
save_basename = os.path.basename(input_probes)
else:
save_basename = 'probes_new.fasta'
save_basename = save_basename.replace('.fasta', f"_primer_{fwd_primer.id.split('_')[-1]}_{rev_primer.id.split('_')[-1]}.fasta")
if isinstance(input_probes, str) and save_folder is None:
save_folder = os.path.dirname(input_probes)
elif save_folder is not None:
if not os.path.exists(save_folder):
os.makedirs(save_folder)
else:
raise ValueError(f"save_folder is not given!")
save_filename = os.path.join(save_folder, save_basename)
with open(save_filename, 'w') as _output_handle:
if verbose:
print(f"-- saving to file: {save_filename} with {len(_updated_records)} probes")
SeqIO.write(_updated_records, _output_handle, "fasta")
return _updated_records
def replace_gene_specifc_readouts(probe_records, gene_2_readout_dict, swap_ratio,
swap_position=0, region_pb_lower_limit=75,
primer_len=20, readout_len=20, target_len=42, add_rand_gap=0,
save=True, save_name='candidate_probes_swapped.fasta', save_folder=None,
overwrite=True, verbose=True):
"""Function to swap in gene_specific readouts for assembled probes
"""
if isinstance(probe_records, str):
if os.path.isfile(probe_records):
with open(probe_records, 'r') as _handle:
_pb_records = []
for _record in SeqIO.parse(_handle, "fasta"):
_pb_records.append(_record)
elif isinstance(probe_records, list):
_pb_records = [_r for _r in probe_records]
# split probes by region
_region_2_pbs = quality_check.split_probe_by_gene(_pb_records)
_gene_2_readout_2_pbs = {}
# split regions by gene
for _reg, _pbs in _region_2_pbs.items():
_ind = int(_reg.split('-')[-1])
_gene = _reg[:-len(str(_ind))-1]
# create if not exist
if _gene not in _gene_2_readout_2_pbs:
_gene_2_readout_2_pbs[_gene] = {}
# append
_gene_2_readout_2_pbs[_gene][_ind] = _pbs
# initialize new probe list
_new_pb_records = []
# start
for _gene, _readout_2_pbs in _gene_2_readout_2_pbs.items():
# initialize
_gene_readout = gene_2_readout_dict[_gene]
_swapped_gene_pb_num = 0
if verbose:
print(f"- replace {swap_ratio} of {_gene} probes with readout: {_gene_readout.id}")
for _ind in sorted(_readout_2_pbs):
_pbs = _readout_2_pbs[_ind]
if len(_pbs) > region_pb_lower_limit:
_num_pb_swapped = int(len(_pbs) * swap_ratio)
# if there is at least one probe to be replaced:
if _num_pb_swapped > 0:
_swapped_gene_pb_num += _num_pb_swapped
# select swapped indices
_swap_pb_inds = np.arange(0, len(_pbs), int(len(_pbs)/_num_pb_swapped))
if verbose:
print(f"-- {len(_swap_pb_inds)} probes in gene:{_gene}, reg:{_ind}")
for _pb_ind in _swap_pb_inds:
_pb = _pbs[_pb_ind]
_target, _readouts, _fp, _rp = \
quality_check._parsing_probe_sequence(
_pb,
primer_len=primer_len, readout_len=readout_len,
target_len=target_len, add_rand_gap=add_rand_gap)
_info, _rd_names, _pid = \
quality_check._parse_probe_name(_pb)
# swap readouts
_readouts[swap_position] = _gene_readout.seq[-readout_len:]
_rd_names[swap_position] = _gene_readout.id + _rd_names[swap_position][-2:]
# reassemble
_new_pb = _assemble_single_probe(
_target, _readouts, _fp, _rp,
_primer_len=primer_len, _readout_len=readout_len,
_target_len=target_len, _add_rand_gap=add_rand_gap,)
_new_name = _assemble_single_probename(
_info, _rd_names, _pid, _fp, _rp)
_new_pb.id = _new_name
_new_pb.name = ''
_new_pb.description = ''
# replace
_pbs[_pb_ind] = _new_pb
else:
if verbose:
print(f"-- skip gene:{_gene}, reg:{_ind} with {len(_pbs)} probes")
else:
# dont replace if this region don't have enough probes
if verbose:
print(f"-- skip gene:{_gene}, reg:{_ind} with {len(_pbs)} probes")
# add probes of this region into the new probe list
_new_pb_records.extend(_pbs)
if verbose:
print(f"- {_swapped_gene_pb_num} probes for {_gene} replaced.")
return _new_pb_records
| 46.402703 | 145 | 0.612907 |
5bc0ff8e3edfe0bb32ac885b8506fd56019920d7 | 24,122 | py | Python | src/snowflake/connector/converter.py | timgraham/snowflake-connector-python | a14ed5776f25fcca9fa13853bbdb171b4f146b76 | [
"Apache-2.0"
] | 311 | 2017-01-06T03:02:10.000Z | 2022-03-28T12:39:10.000Z | src/snowflake/connector/converter.py | timgraham/snowflake-connector-python | a14ed5776f25fcca9fa13853bbdb171b4f146b76 | [
"Apache-2.0"
] | 720 | 2017-01-12T19:05:15.000Z | 2022-03-31T12:24:36.000Z | src/snowflake/connector/converter.py | timgraham/snowflake-connector-python | a14ed5776f25fcca9fa13853bbdb171b4f146b76 | [
"Apache-2.0"
] | 289 | 2017-02-02T00:01:41.000Z | 2022-03-19T06:01:23.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import binascii
import decimal
import time
from datetime import date, datetime
from datetime import time as dt_t
from datetime import timedelta
from logging import getLogger
from typing import Any, Callable, Dict, Optional, Tuple, Union
import pytz
from .compat import IS_BINARY, IS_NUMERIC
from .errorcode import ER_NOT_SUPPORT_DATA_TYPE
from .errors import ProgrammingError
from .sfbinaryformat import binary_to_python, binary_to_snowflake
from .sfdatetime import sfdatetime_total_seconds_from_timedelta
try:
import numpy
except ImportError:
numpy = None
try:
import tzlocal
except ImportError:
tzlocal = None
BITS_FOR_TIMEZONE = 14
ZERO_TIMEDELTA = timedelta(seconds=0)
ZERO_EPOCH_DATE = date(1970, 1, 1)
ZERO_EPOCH = datetime.utcfromtimestamp(0)
ZERO_FILL = "000000000"
logger = getLogger(__name__)
PYTHON_TO_SNOWFLAKE_TYPE = {
"int": "FIXED",
"long": "FIXED",
"decimal": "FIXED",
"float": "REAL",
"str": "TEXT",
"unicode": "TEXT",
"bytes": "BINARY",
"bytearray": "BINARY",
"bool": "BOOLEAN",
"bool_": "BOOLEAN",
"nonetype": "ANY",
"datetime": "TIMESTAMP_NTZ",
"sfdatetime": "TIMESTAMP_NTZ",
"date": "DATE",
"time": "TIME",
"struct_time": "TIMESTAMP_NTZ",
"timedelta": "TIME",
"list": "TEXT",
"tuple": "TEXT",
"int8": "FIXED",
"int16": "FIXED",
"int32": "FIXED",
"int64": "FIXED",
"uint8": "FIXED",
"uint16": "FIXED",
"uint32": "FIXED",
"uint64": "FIXED",
"float16": "REAL",
"float32": "REAL",
"float64": "REAL",
"datetime64": "TIMESTAMP_NTZ",
"quoted_name": "TEXT",
}
# Type alias
SnowflakeConverterType = Callable[[Any], Any]
def convert_datetime_to_epoch(dt: datetime) -> float:
"""Converts datetime to epoch time in seconds.
If Python > 3.3, you may use timestamp() method.
"""
if dt.tzinfo is not None:
dt0 = dt.astimezone(pytz.UTC).replace(tzinfo=None)
else:
dt0 = dt
return (dt0 - ZERO_EPOCH).total_seconds()
def _convert_datetime_to_epoch_nanoseconds(dt: datetime) -> str:
return "{:f}".format(convert_datetime_to_epoch(dt)).replace(".", "") + "000"
def _convert_date_to_epoch_milliseconds(dt: datetime) -> str:
return "{:.3f}".format((dt - ZERO_EPOCH_DATE).total_seconds()).replace(".", "")
def _convert_time_to_epoch_nanoseconds(tm: dt_t) -> str:
return (
str(tm.hour * 3600 + tm.minute * 60 + tm.second)
+ "{:06d}".format(tm.microsecond)
+ "000"
)
def _extract_timestamp(value, ctx):
"""Extracts timestamp from a raw data."""
scale = ctx["scale"]
microseconds = float(value[0 : -scale + 6]) if scale > 6 else float(value)
fraction_of_nanoseconds = _adjust_fraction_of_nanoseconds(
value, ctx["max_fraction"], scale
)
return microseconds, fraction_of_nanoseconds
def _adjust_fraction_of_nanoseconds(value, max_fraction, scale):
if scale == 0:
return 0
if value[0] != "-":
return int(value[-scale:] + ZERO_FILL[: 9 - scale])
frac = int(value[-scale:])
if frac == 0:
return 0
else:
return int(str(max_fraction - frac) + ZERO_FILL[: 9 - scale])
def _generate_tzinfo_from_tzoffset(tzoffset_minutes: int) -> pytz._FixedOffset:
"""Generates tzinfo object from tzoffset."""
return pytz.FixedOffset(tzoffset_minutes)
class SnowflakeConverter(object):
def __init__(self, **kwargs):
self._parameters: Dict[str, Union[str, int, bool]] = {}
self._use_numpy = kwargs.get("use_numpy", False) and numpy is not None
logger.debug("use_numpy: %s", self._use_numpy)
def set_parameters(self, new_parameters: Dict) -> None:
self._parameters = new_parameters
def set_parameter(self, param: Any, value: Any) -> None:
self._parameters[param] = value
def get_parameters(self) -> Dict[str, Union[str, int, bool]]:
return self._parameters
def get_parameter(self, param: str) -> Optional[Union[str, int, bool]]:
return self._parameters.get(param)
def to_python_method(self, type_name, column) -> "SnowflakeConverterType":
"""FROM Snowflake to Python Objects"""
ctx = column.copy()
if ctx.get("scale") is not None:
ctx["max_fraction"] = int(10 ** ctx["scale"])
ctx["zero_fill"] = "0" * (9 - ctx["scale"])
converters = ["_{type_name}_to_python".format(type_name=type_name)]
if self._use_numpy:
converters.insert(
0, "_{type_name}_numpy_to_python".format(type_name=type_name)
)
for conv in converters:
try:
return getattr(self, conv)(ctx)
except AttributeError:
pass
logger.warning("No column converter found for type: %s", type_name)
return None # Skip conversion
def _FIXED_to_python(self, ctx):
return int if ctx["scale"] == 0 else decimal.Decimal
def _FIXED_numpy_to_python(self, ctx):
if ctx["scale"]:
return numpy.float64
else:
def conv(value):
try:
return numpy.int64(value)
except OverflowError:
return int(value)
return conv
def _REAL_to_python(self, _):
return float
def _REAL_numpy_to_python(self, _):
return numpy.float64
def _TEXT_to_python(self, _):
return None # skip conv
def _BINARY_to_python(self, _):
return binary_to_python
def _DATE_to_python(self, _):
"""Converts DATE to date."""
def conv(value: str) -> date:
try:
return datetime.utcfromtimestamp(int(value) * 86400).date()
except (OSError, ValueError) as e:
logger.debug("Failed to convert: %s", e)
ts = ZERO_EPOCH + timedelta(seconds=int(value) * (24 * 60 * 60))
return date(ts.year, ts.month, ts.day)
return conv
def _DATE_numpy_to_python(self, _):
"""Converts DATE to datetime.
No timezone is attached.
"""
return lambda x: numpy.datetime64(int(x), "D")
def _TIMESTAMP_TZ_to_python(self, ctx):
"""Converts TIMESTAMP TZ to datetime.
The timezone offset is piggybacked.
"""
scale = ctx["scale"]
def conv0(encoded_value: str) -> datetime:
value, tz = encoded_value.split()
tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440)
return datetime.fromtimestamp(float(value), tz=tzinfo)
def conv(encoded_value: str) -> datetime:
value, tz = encoded_value.split()
microseconds = float(value[0 : -scale + 6])
tzinfo = _generate_tzinfo_from_tzoffset(int(tz) - 1440)
return datetime.fromtimestamp(microseconds, tz=tzinfo)
return conv if scale > 6 else conv0
def _get_session_tz(self):
"""Gets the session timezone or use the local computer's timezone."""
try:
tz = self.get_parameter("TIMEZONE")
if not tz:
tz = "UTC"
return pytz.timezone(tz)
except pytz.exceptions.UnknownTimeZoneError:
logger.warning("converting to tzinfo failed")
if tzlocal is not None:
return tzlocal.get_localzone()
else:
return datetime.timezone.utc
def _pre_TIMESTAMP_LTZ_to_python(self, value, ctx) -> datetime:
"""Converts TIMESTAMP LTZ to datetime.
This takes consideration of the session parameter TIMEZONE if available. If not, tzlocal is used.
"""
microseconds, fraction_of_nanoseconds = _extract_timestamp(value, ctx)
tzinfo_value = self._get_session_tz()
try:
t0 = ZERO_EPOCH + timedelta(seconds=microseconds)
t = pytz.utc.localize(t0, is_dst=False).astimezone(tzinfo_value)
return t, fraction_of_nanoseconds
except OverflowError:
logger.debug(
"OverflowError in converting from epoch time to "
"timestamp_ltz: %s(ms). Falling back to use struct_time."
)
return time.localtime(microseconds), fraction_of_nanoseconds
def _TIMESTAMP_LTZ_to_python(self, ctx):
tzinfo = self._get_session_tz()
scale = ctx["scale"]
def conv0(value):
return datetime.fromtimestamp(float(value), tz=tzinfo)
def conv(value):
microseconds = float(value[0 : -scale + 6])
return datetime.fromtimestamp(microseconds, tz=tzinfo)
return conv if scale > 6 else conv0
_TIMESTAMP_to_python = _TIMESTAMP_LTZ_to_python
def _TIMESTAMP_NTZ_to_python(self, ctx):
"""TIMESTAMP NTZ to datetime with no timezone info is attached."""
scale = ctx["scale"]
def conv0(value):
return datetime.utcfromtimestamp(float(value))
def conv(value: str) -> datetime:
microseconds = float(value[0 : -scale + 6])
return datetime.utcfromtimestamp(microseconds)
return conv if scale > 6 else conv0
def _TIMESTAMP_NTZ_numpy_to_python(self, ctx):
"""TIMESTAMP NTZ to datetime64 with no timezone info is attached."""
def conv(value: str) -> "numpy.datetime64":
nanoseconds = int(decimal.Decimal(value).scaleb(9))
return numpy.datetime64(nanoseconds, "ns")
return conv
def _TIME_to_python(self, ctx):
"""TIME to formatted string, SnowflakeDateTime, or datetime.time with no timezone attached."""
scale = ctx["scale"]
def conv0(value):
return datetime.utcfromtimestamp(float(value)).time()
def conv(value: str) -> dt_t:
microseconds = float(value[0 : -scale + 6])
return datetime.utcfromtimestamp(microseconds).time()
return conv if scale > 6 else conv0
def _VARIANT_to_python(self, _):
return None # skip conv
_OBJECT_to_python = _VARIANT_to_python
_ARRAY_to_python = _VARIANT_to_python
def _BOOLEAN_to_python(self, ctx):
return lambda value: value in ("1", "TRUE")
def snowflake_type(self, value):
"""Returns Snowflake data type for the value. This is used for qmark parameter style."""
type_name = value.__class__.__name__.lower()
return PYTHON_TO_SNOWFLAKE_TYPE.get(type_name)
def to_snowflake_bindings(self, snowflake_type, value):
"""Converts Python data to snowflake data for qmark and numeric parameter style.
The output is bound in a query in the server side.
"""
type_name = value.__class__.__name__.lower()
return getattr(self, f"_{type_name}_to_snowflake_bindings")(
snowflake_type, value
)
def _str_to_snowflake_bindings(self, _, value: str) -> str:
# NOTE: str type is always taken as a text data and never binary
return str(value)
_int_to_snowflake_bindings = _str_to_snowflake_bindings
_long_to_snowflake_bindings = _str_to_snowflake_bindings
_float_to_snowflake_bindings = _str_to_snowflake_bindings
_unicode_to_snowflake_bindings = _str_to_snowflake_bindings
_decimal_to_snowflake_bindings = _str_to_snowflake_bindings
def _bytes_to_snowflake_bindings(self, _, value: bytes) -> str:
return binascii.hexlify(value).decode("utf-8")
_bytearray_to_snowflake_bindings = _bytes_to_snowflake_bindings
def _bool_to_snowflake_bindings(self, _, value: bool) -> str:
return str(value).lower()
def _nonetype_to_snowflake_bindings(self, *_) -> None:
return None
def _date_to_snowflake_bindings(self, _, value: date) -> str:
# milliseconds
return _convert_date_to_epoch_milliseconds(value)
def _time_to_snowflake_bindings(self, _, value: dt_t) -> str:
# nanoseconds
return _convert_time_to_epoch_nanoseconds(value)
def _datetime_to_snowflake_bindings(
self, snowflake_type: str, value: datetime
) -> str:
snowflake_type = snowflake_type.upper()
if snowflake_type == "TIMESTAMP_LTZ":
_, t = self._derive_offset_timestamp(value)
return _convert_datetime_to_epoch_nanoseconds(t)
elif snowflake_type == "TIMESTAMP_NTZ":
# nanoseconds
return _convert_datetime_to_epoch_nanoseconds(value)
elif snowflake_type == "TIMESTAMP_TZ":
offset, t = self._derive_offset_timestamp(value, is_utc=True)
return _convert_datetime_to_epoch_nanoseconds(t) + " {:04d}".format(
int(offset)
)
else:
raise ProgrammingError(
msg="Binding datetime object with Snowflake data type {} is "
"not supported.".format(snowflake_type),
errno=ER_NOT_SUPPORT_DATA_TYPE,
)
def _derive_offset_timestamp(
self, value: datetime, is_utc: bool = False
) -> Tuple[float, datetime]:
"""Derives TZ offset and timestamp from the datetime objects."""
tzinfo = value.tzinfo
if tzinfo is None:
# If no tzinfo is attached, use local timezone.
tzinfo = self._get_session_tz() if not is_utc else pytz.UTC
t = pytz.utc.localize(value, is_dst=False).astimezone(tzinfo)
else:
# if tzinfo is attached, just covert to epoch time
# as the server expects it in UTC anyway
t = value
offset = tzinfo.utcoffset(t.replace(tzinfo=None)).total_seconds() / 60 + 1440
return offset, t
def _struct_time_to_snowflake_bindings(
self, snowflake_type: str, value: time.struct_time
) -> str:
return self._datetime_to_snowflake_bindings(
snowflake_type, datetime.fromtimestamp(time.mktime(value))
)
def _timedelta_to_snowflake_bindings(
self, snowflake_type: str, value: timedelta
) -> str:
snowflake_type = snowflake_type.upper()
if snowflake_type != "TIME":
raise ProgrammingError(
msg="Binding timedelta object with Snowflake data type {} is "
"not supported.".format(snowflake_type),
errno=ER_NOT_SUPPORT_DATA_TYPE,
)
(hours, r) = divmod(value.seconds, 3600)
(mins, secs) = divmod(r, 60)
hours += value.days * 24
return (
str(hours * 3600 + mins * 60 + secs)
+ "{:06d}".format(value.microseconds)
+ "000"
)
def to_snowflake(self, value: Any) -> Any:
"""Converts Python data to Snowflake data for pyformat/format style.
The output is bound in a query in the client side.
"""
type_name = value.__class__.__name__.lower()
return getattr(self, f"_{type_name}_to_snowflake")(value)
def _int_to_snowflake(self, value: int) -> int:
return int(value)
def _long_to_snowflake(self, value):
return long(value) # noqa: F821
def _float_to_snowflake(self, value: float) -> float:
return float(value)
def _str_to_snowflake(self, value: str) -> str:
return str(value)
_unicode_to_snowflake = _str_to_snowflake
def _bytes_to_snowflake(self, value: bytes) -> bytes:
return binary_to_snowflake(value)
_bytearray_to_snowflake = _bytes_to_snowflake
def _bool_to_snowflake(self, value: bool) -> bool:
return value
def _bool__to_snowflake(self, value) -> bool:
return bool(value)
def _nonetype_to_snowflake(self, _):
return None
def _total_seconds_from_timedelta(self, td: timedelta) -> int:
return sfdatetime_total_seconds_from_timedelta(td)
def _datetime_to_snowflake(self, value: datetime) -> str:
tzinfo_value = value.tzinfo
if tzinfo_value:
if pytz.utc != tzinfo_value:
try:
td = tzinfo_value.utcoffset(value)
except pytz.exceptions.AmbiguousTimeError:
td = tzinfo_value.utcoffset(value, is_dst=False)
else:
td = ZERO_TIMEDELTA
sign = "+" if td >= ZERO_TIMEDELTA else "-"
td_secs = sfdatetime_total_seconds_from_timedelta(td)
h, m = divmod(abs(td_secs // 60), 60)
if value.microsecond:
return (
"{year:d}-{month:02d}-{day:02d} "
"{hour:02d}:{minute:02d}:{second:02d}."
"{microsecond:06d}{sign}{tzh:02d}:{tzm:02d}"
).format(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond,
sign=sign,
tzh=h,
tzm=m,
)
return (
"{year:d}-{month:02d}-{day:02d} "
"{hour:02d}:{minute:02d}:{second:02d}"
"{sign}{tzh:02d}:{tzm:02d}"
).format(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
sign=sign,
tzh=h,
tzm=m,
)
else:
if value.microsecond:
return (
"{year:d}-{month:02d}-{day:02d} "
"{hour:02d}:{minute:02d}:{second:02d}."
"{microsecond:06d}"
).format(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond,
)
return (
"{year:d}-{month:02d}-{day:02d} " "{hour:02d}:{minute:02d}:{second:02d}"
).format(
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
)
def _date_to_snowflake(self, value: date) -> str:
"""Converts Date object to Snowflake object."""
return "{year:d}-{month:02d}-{day:02d}".format(
year=value.year, month=value.month, day=value.day
)
def _time_to_snowflake(self, value: dt_t) -> str:
if value.microsecond:
return value.strftime("%H:%M:%S.%%06d") % value.microsecond
return value.strftime("%H:%M:%S")
def _struct_time_to_snowflake(self, value: time.struct_time) -> str:
tzinfo_value = _generate_tzinfo_from_tzoffset(time.timezone // 60)
t = datetime.fromtimestamp(time.mktime(value))
if pytz.utc != tzinfo_value:
t += tzinfo_value.utcoffset(t)
t = t.replace(tzinfo=tzinfo_value)
return self._datetime_to_snowflake(t)
def _timedelta_to_snowflake(self, value: timedelta) -> str:
(hours, r) = divmod(value.seconds, 3600)
(mins, secs) = divmod(r, 60)
hours += value.days * 24
if value.microseconds:
return ("{hour:02d}:{minute:02d}:{second:02d}." "{microsecond:06d}").format(
hour=hours, minute=mins, second=secs, microsecond=value.microseconds
)
return "{hour:02d}:{minute:02d}:{second:02d}".format(
hour=hours, minute=mins, second=secs
)
def _decimal_to_snowflake(self, value: decimal.Decimal) -> Optional[str]:
if isinstance(value, decimal.Decimal):
return str(value)
return None
def _list_to_snowflake(self, value: list) -> list:
return [
SnowflakeConverter.quote(v0)
for v0 in [SnowflakeConverter.escape(v) for v in value]
]
_tuple_to_snowflake = _list_to_snowflake
def __numpy_to_snowflake(self, value):
return value
_int8_to_snowflake = __numpy_to_snowflake
_int16_to_snowflake = __numpy_to_snowflake
_int32_to_snowflake = __numpy_to_snowflake
_int64_to_snowflake = __numpy_to_snowflake
_uint8_to_snowflake = __numpy_to_snowflake
_uint16_to_snowflake = __numpy_to_snowflake
_uint32_to_snowflake = __numpy_to_snowflake
_uint64_to_snowflake = __numpy_to_snowflake
_float16_to_snowflake = __numpy_to_snowflake
_float32_to_snowflake = __numpy_to_snowflake
_float64_to_snowflake = __numpy_to_snowflake
def _datetime64_to_snowflake(self, value) -> str:
return str(value) + "+00:00"
def _quoted_name_to_snowflake(self, value) -> str:
return str(value)
def __getattr__(self, item):
if item.endswith("_to_snowflake"):
raise ProgrammingError(
msg="Binding data in type ({}) is not supported.".format(
item[1 : item.find("_to_snowflake")]
),
errno=ER_NOT_SUPPORT_DATA_TYPE,
)
elif item.endswith("to_snowflake_bindings"):
raise ProgrammingError(
msg="Binding data in type ({}) is not supported.".format(
item[1 : item.find("_to_snowflake_bindings")]
),
errno=ER_NOT_SUPPORT_DATA_TYPE,
)
raise AttributeError("No method is available: {}".format(item))
def to_csv_bindings(self, value: Union[Tuple[str, Any], Any]) -> Union[str, None]:
"""Convert value to a string representation in CSV-escaped format to INSERT INTO."""
if isinstance(value, tuple) and len(value) == 2:
_type, val = value
if _type in ["TIMESTAMP_TZ", "TIME"]:
# unspecified timezone is considered utc
if getattr(val, "tzinfo", 1) is None:
val = self.to_snowflake(pytz.utc.localize(val))
else:
val = self.to_snowflake(val)
else:
val = self.to_snowflake_bindings(_type, val)
else:
if isinstance(value, (dt_t, timedelta)):
val = self.to_snowflake(value)
else:
_type = self.snowflake_type(value)
val = self.to_snowflake_bindings(_type, value)
return self.escape_for_csv(val)
@staticmethod
def escape(value):
if isinstance(value, list):
return value
if value is None or IS_NUMERIC(value) or IS_BINARY(value):
return value
res = value
res = res.replace("\\", "\\\\")
res = res.replace("\n", "\\n")
res = res.replace("\r", "\\r")
res = res.replace("\047", "\134\047") # single quotes
return res
@staticmethod
def quote(value) -> str:
if isinstance(value, list):
return ",".join(value)
if value is None:
return "NULL"
elif isinstance(value, bool):
return "TRUE" if value else "FALSE"
elif IS_NUMERIC(value):
return str(repr(value))
elif IS_BINARY(value):
# Binary literal syntax
return "X'{}'".format(value.decode("ascii"))
return "'{}'".format(value)
@staticmethod
def escape_for_csv(value: str) -> str:
if value is None: # NULL
return ""
elif not value: # Empty string
return '""'
if (
value.find('"') >= 0
or value.find("\n") >= 0
or value.find(",") >= 0
or value.find("\\") >= 0
):
# replace single quote with double quotes
value = value.replace('"', '""')
return f'"{value}"'
else:
return value
| 34.312945 | 105 | 0.597421 |
87cc9d59ea3d5fd2d7a93fc0ddffe7344df9319a | 2,443 | py | Python | present/markdown.py | legendofmiracles/present | 0c53ec66f81611e9e811741e0be7c927fd996d24 | [
"Apache-2.0"
] | null | null | null | present/markdown.py | legendofmiracles/present | 0c53ec66f81611e9e811741e0be7c927fd996d24 | [
"Apache-2.0"
] | null | null | null | present/markdown.py | legendofmiracles/present | 0c53ec66f81611e9e811741e0be7c927fd996d24 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import warnings
import yaml
from mistune import markdown
from .slide import (
Slide,
Heading,
Paragraph,
Text,
Strong,
Codespan,
Emphasis,
Link,
List,
Image,
Codio,
BlockCode,
BlockHtml,
BlockQuote,
)
class Markdown(object):
"""Parse and traverse through the markdown abstract syntax tree."""
def __init__(self, filename):
self.filename = filename
self.dirname = os.path.dirname(os.path.realpath(filename))
def parse(self):
with open(self.filename, "r") as f:
text = f.read()
slides = []
ast = markdown(text, renderer="ast")
sliden = 0
buffer = []
for i, obj in enumerate(ast):
if obj["type"] in ["newline"]:
continue
if obj["type"] == "thematic_break" and buffer:
slides.append(Slide(elements=buffer))
sliden += 1
buffer = []
continue
try:
if obj["type"] == "paragraph":
images = [c for c in obj["children"]
if c["type"] == "image"]
not_images = [c for c in obj["children"]
if c["type"] != "image"]
for image in images:
image["src"] = os.path.join(
self.dirname, os.path.expanduser(image["src"]))
if image["alt"] == "codio":
with open(image["src"], "r") as f:
codio = yaml.load(f, Loader=yaml.Loader)
buffer.append(Codio(obj=codio))
else:
buffer.append(Image(obj=image))
obj["children"] = not_images
buffer.append(Paragraph(obj=obj))
else:
element_name = obj["type"].title().replace("_", "")
Element = eval(element_name)
print(obj)
buffer.append(Element(obj=obj))
except NameError:
warnings.warn(
f"(Slide {sliden + 1}) {element_name} is not supported")
if i == len(ast) - 1:
slides.append(Slide(elements=buffer))
sliden += 1
return slides
| 28.08046 | 76 | 0.453131 |
1446a4a212bc53ce7bb208360a29ee5a4002119d | 10,664 | py | Python | python/paddle/fluid/contrib/mixed_precision/decorator.py | JesseyXujin/Paddle | 843bdbaae1253d6dc964e6beddce239a88add113 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/mixed_precision/decorator.py | JesseyXujin/Paddle | 843bdbaae1253d6dc964e6beddce239a88add113 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/mixed_precision/decorator.py | JesseyXujin/Paddle | 843bdbaae1253d6dc964e6beddce239a88add113 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import default_main_program
from ... import default_startup_program
from ... import layers
from ... import unique_name
from . import fp16_utils
from .fp16_utils import update_loss_scaling, rewrite_program
from .fp16_utils import update_role_var_grad
from .fp16_lists import AutoMixedPrecisionLists
__all__ = ["decorate"]
class OptimizerWithMixedPrecison(object):
"""
Optimizer with mixed-precision (MP) training. This is a wrapper of a common
optimizer, plus the support of mixed-precision pretraining. The object
of this class almost has the same behavior as the common optimizer, with the
methods `minimize()`, `backward()`, `apply_gradients()` implemented.
Additionally, it enables the MP training automatically, i.e, the creation
and maintenance of master parameters, scaling of loss, etc.
Args:
optimizer (Optimizer): A common Optimizer object.
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
init_loss_scaling (float): The initial loss scaling factor.
use_dynamic_loss_scaling (bool): Whether to use dynamic loss scaling.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
"""
def __init__(self, optimizer, amp_lists, init_loss_scaling,
use_dynamic_loss_scaling, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
self._optimizer = optimizer
self._amp_lists = amp_lists
self._param_grads = None
self._train_program = default_main_program()
self._startup_prog = default_startup_program()
self._loss_scaling = layers.create_global_var(
name=unique_name.generate("loss_scaling"),
shape=[1],
value=init_loss_scaling,
dtype='float32',
persistable=True)
self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
if self._use_dynamic_loss_scaling:
self._incr_every_n_steps = layers.fill_constant(
shape=[1], dtype='int32', value=incr_every_n_steps)
self._decr_every_n_nan_or_inf = layers.fill_constant(
shape=[1], dtype='int32', value=decr_every_n_nan_or_inf)
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._num_good_steps = layers.create_global_var(
name=unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
self._num_bad_steps = layers.create_global_var(
name=unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True)
# Ensure the data type of learning rate vars is float32 (same as the
# master parameter dtype)
if isinstance(optimizer._learning_rate, float):
optimizer._learning_rate_map[default_main_program()] = \
layers.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(optimizer._learning_rate),
dtype='float32',
persistable=True)
def get_loss_scaling(self):
"""Return the real-time loss scaling factor.
"""
return self._loss_scaling
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
Backward propogation or auto differentiation for gradients' computation.
Args:
loss (Variable): The loss Variable to minimize.
startup_program (Program|None): The startup Program for initializing
parameters in `parameter_list`.
parameter_list (list|None): A list of Variables to update.
no_grad_set (set|None): A set of Variables should be ignored.
callbacks (list|None): A list of callables to run when appending
backward operator for one parameter.
Returns:
A list of (param, grad), which is a tuple of a parameter and its
gradient respectively, and the scaled loss.
"""
rewrite_program(self._train_program, self._amp_lists)
scaled_loss = loss * self._loss_scaling
self._params_grads = self._optimizer.backward(
scaled_loss, startup_program, parameter_list, no_grad_set,
callbacks)
update_role_var_grad(self._train_program, self._params_grads)
scaled_params_grads = []
for p, g in self._params_grads:
with self._train_program._optimized_guard([p, g]):
scaled_g = g / self._loss_scaling
scaled_params_grads.append([p, scaled_g])
return scaled_params_grads
def apply_gradients(self, scaled_params_grads):
"""
Check scaled gradients to determine whether to update loss scaling and update
parameters by their scaled gradients,
Args:
scaled_params_grads (list): A list of params and scaled grads.
Returns:
A list of optimize operators.
"""
if self._use_dynamic_loss_scaling:
grads = [layers.reduce_sum(g) for [_, g] in scaled_params_grads]
all_grads = layers.concat(grads)
all_grads_sum = layers.reduce_sum(all_grads)
is_overall_finite = layers.isfinite(all_grads_sum)
update_loss_scaling(is_overall_finite, self._loss_scaling,
self._num_good_steps, self._num_bad_steps,
self._incr_every_n_steps,
self._decr_every_n_nan_or_inf, self._incr_ratio,
self._decr_ratio)
# apply_gradient append all ops in global block, thus we shouldn't
# apply gradient in the switch branch.
with layers.Switch() as switch:
with switch.case(is_overall_finite):
pass
with switch.default():
for _, g in scaled_params_grads:
layers.assign(layers.zeros_like(g), g)
optimize_ops = self._optimizer.apply_gradients(scaled_params_grads)
return optimize_ops
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
Perform optimization by minimizing the given loss.
Args:
loss (Variable): The loss Variable.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
The scaled loss by scaling factor, the list of optimize ops, and a
list of scaled parameters and gradients.
"""
scaled_params_grads = self.backward(
loss,
startup_program=startup_program,
parameter_list=parameter_list,
no_grad_set=no_grad_set)
optimize_ops = self.apply_gradients(scaled_params_grads)
return optimize_ops, scaled_params_grads
def decorate(optimizer,
amp_lists=None,
init_loss_scaling=1.0,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
use_dynamic_loss_scaling=True):
"""
Decorate the given optimizer to adapt to the mixed-precision training.
Args:
optimizer(Optimizer): A common Optimizer.
amp_lists (AutoMixedPrecisionLists): An AutoMixedPrecisionLists object.
init_loss_scaling(float): The initial loss scaling factor.
incr_every_n_steps(int): Increases loss scaling every n consecutive
steps with finite gradients.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n
accumulated steps with nan or
inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
the loss scaling.
use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling.
Returns:
An optimizer acting like a normal one but with mixed-precision training
enabled.
Examples:
.. code-block:: python
loss = network()
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
mp_optimizer = fluid.contrib.mixed_precision.decorate(
optimizer=optimizer, init_loss_scaling=8.0)
ops, param_grads = mp_optimizer.minimize(loss)
scaled_loss = mp_optimizer.get_loss_scaling()
"""
if amp_lists is None:
amp_lists = AutoMixedPrecisionLists()
mp_optimizer = OptimizerWithMixedPrecison(
optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio)
return mp_optimizer
| 41.494163 | 86 | 0.62078 |
a99c1966896d513e88240a085f2e39cb0af773b1 | 633 | py | Python | app/spoonderful/main.py | jsilke/spoonderful | 46b5a36bb190f9af23885d214fa490e5c869433c | [
"MIT"
] | 2 | 2022-02-25T02:28:23.000Z | 2022-03-04T17:17:58.000Z | app/spoonderful/main.py | jsilke/spoonderful | 46b5a36bb190f9af23885d214fa490e5c869433c | [
"MIT"
] | null | null | null | app/spoonderful/main.py | jsilke/spoonderful | 46b5a36bb190f9af23885d214fa490e5c869433c | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from .data import models
from .data.database import engine
from .routes import (
register,
user,
login,
vote,
recommendation,
)
# Create tables in the database from the ORM models if they do not exist.
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
app.include_router(register.router)
app.include_router(user.router)
app.include_router(login.router)
app.include_router(vote.router)
app.include_router(recommendation.router)
@app.get("/")
def root():
return {
"message": "Visit https://savefood.xyz/docs to view documentation and try out the API!"
}
| 21.1 | 95 | 0.731438 |
b6fc1a519f501031fb7f6526c8795fc31874af73 | 1,133 | py | Python | p354h/max_envelopes.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | 1 | 2020-02-20T12:04:46.000Z | 2020-02-20T12:04:46.000Z | p354h/max_envelopes.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | p354h/max_envelopes.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | from typing import List
from bisect import bisect_left
class Solution:
def maxEnvelopesN2(self, envelopes: List[List[int]]) -> int:
heights = [e[1] for e in sorted(envelopes, key=lambda e: (e[0], -e[1]))]
dp, ans = [1] * len(heights), 1
for i in range(1, len(heights)):
dp[i] = (
max([d for j, d in enumerate(dp[:i]) if heights[j] < heights[i]] or [0])
+ 1
)
ans = max(ans, dp[i])
return ans
def maxEnvelopesNLogN(self, envelopes: List[List[int]]) -> int:
heights = [e[1] for e in sorted(envelopes, key=lambda e: (e[0], -e[1]))]
tails = []
for h in heights:
i = bisect_left(tails, h)
tails[i : i + 1] = [h]
return len(tails)
# TESTS
for envelopes, expected in [
([[5, 4], [6, 4], [6, 7], [2, 3]], 3),
([[1, 1], [1, 1], [1, 1]], 1),
]:
sol = Solution()
actual = sol.maxEnvelopesN2(envelopes)
print("The maximum # of envelopes from", envelopes, "->", actual)
assert actual == expected
assert expected == sol.maxEnvelopesNLogN(envelopes)
| 30.621622 | 88 | 0.529568 |
34e0a81c4fc429a0d0c3b77d2758b24e8205f52e | 11,543 | py | Python | spanner/google/cloud/spanner/client.py | alercunha/google-cloud-python | b28a4eb667ae08c3f4dcf9af891ed4931884989c | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | spanner/google/cloud/spanner/client.py | jujudubai/google-cloud-python | b28a4eb667ae08c3f4dcf9af891ed4931884989c | [
"Apache-2.0"
] | null | null | null | spanner/google/cloud/spanner/client.py | jujudubai/google-cloud-python | b28a4eb667ae08c3f4dcf9af891ed4931884989c | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Cloud Spanner API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.spanner.client.Client` owns an
:class:`~google.cloud.spanner.instance.Instance`
* a :class:`~google.cloud.spanner.instance.Instance` owns a
:class:`~google.cloud.spanner.database.Database`
"""
from google.api.core import page_iterator
from google.gax import INITIAL_PAGE
# pylint: disable=line-too-long
from google.cloud.gapic.spanner_admin_database.v1.database_admin_client import ( # noqa
DatabaseAdminClient)
from google.cloud.gapic.spanner_admin_instance.v1.instance_admin_client import ( # noqa
InstanceAdminClient)
# pylint: enable=line-too-long
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.client import ClientWithProject
from google.cloud.spanner import __version__
from google.cloud.spanner._helpers import _options_with_prefix
from google.cloud.spanner.instance import DEFAULT_NODE_COUNT
from google.cloud.spanner.instance import Instance
SPANNER_ADMIN_SCOPE = 'https://www.googleapis.com/auth/spanner.admin'
class InstanceConfig(object):
"""Named configurations for Spanner instances.
:type name: str
:param name: ID of the instance configuration
:type display_name: str
:param display_name: Name of the instance configuration
"""
def __init__(self, name, display_name):
self.name = name
self.display_name = display_name
@classmethod
def from_pb(cls, config_pb):
"""Construct an instance from the equvalent protobuf.
:type config_pb:
:class:`~google.spanner.v1.spanner_instance_admin_pb2.InstanceConfig`
:param config_pb: the protobuf to parse
:rtype: :class:`InstanceConfig`
:returns: an instance of this class
"""
return cls(config_pb.name, config_pb.display_name)
class Client(ClientWithProject):
"""Client for interacting with Cloud Spanner API.
.. note::
Since the Cloud Spanner API requires the gRPC transport, no
``_http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or
:data:`NoneType <types.NoneType>`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not provided, defaults to the Google
Application Default Credentials.
:type user_agent: str
:param user_agent: (Optional) The user agent to be used with API request.
Defaults to :const:`DEFAULT_USER_AGENT`.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
_instance_admin_api = None
_database_admin_api = None
_SET_PROJECT = True # Used by from_service_account_json()
SCOPE = (SPANNER_ADMIN_SCOPE,)
"""The scopes required for Google Cloud Spanner."""
def __init__(self, project=None, credentials=None,
user_agent=DEFAULT_USER_AGENT):
# NOTE: This API has no use for the _http argument, but sending it
# will have no impact since the _http() @property only lazily
# creates a working HTTP object.
super(Client, self).__init__(
project=project, credentials=credentials, _http=None)
self.user_agent = user_agent
@property
def credentials(self):
"""Getter for client's credentials.
:rtype:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>`
:returns: The credentials stored on the client.
"""
return self._credentials
@property
def project_name(self):
"""Project name to be used with Spanner APIs.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: The project name to be used with the Cloud Spanner Admin
API RPC service.
"""
return 'projects/' + self.project
@property
def instance_admin_api(self):
"""Helper for session-related API calls."""
if self._instance_admin_api is None:
self._instance_admin_api = InstanceAdminClient(
credentials=self.credentials,
lib_name='gccl',
lib_version=__version__,
)
return self._instance_admin_api
@property
def database_admin_api(self):
"""Helper for session-related API calls."""
if self._database_admin_api is None:
self._database_admin_api = DatabaseAdminClient(
credentials=self.credentials,
lib_name='gccl',
lib_version=__version__,
)
return self._database_admin_api
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
return self.__class__(
project=self.project,
credentials=self._credentials,
user_agent=self.user_agent,
)
def list_instance_configs(self, page_size=None, page_token=None):
"""List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size: (Optional) Maximum number of results to return.
:type page_token: str
:param page_token: (Optional) Token for fetching next page of results.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner.instance.InstanceConfig`
resources within the client's project.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = _options_with_prefix(self.project_name,
page_token=page_token)
path = 'projects/%s' % (self.project,)
page_iter = self.instance_admin_api.list_instance_configs(
path, page_size=page_size, options=options)
return page_iterator._GAXIterator(
self, page_iter, _item_to_instance_config)
def instance(self, instance_id,
configuration_name=None,
display_name=None,
node_count=DEFAULT_NODE_COUNT):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type configuration_name: string
:param configuration_name:
(Optional) Name of the instance configuration used to set up the
instance's cluster, in the form:
``projects/<project>/instanceConfigs/<config>``.
**Required** for instances which do not yet exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type node_count: int
:param node_count: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`~google.cloud.spanner.instance.Instance`
:returns: an instance owned by this client.
"""
return Instance(
instance_id, self, configuration_name, node_count, display_name)
def list_instances(self, filter_='', page_size=None, page_token=None):
"""List instances for the client's project.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances
:type filter_: string
:param filter_: (Optional) Filter to select instances listed. See
the ``ListInstancesRequest`` docs above for examples.
:type page_size: int
:param page_size: (Optional) Maximum number of results to return.
:type page_token: str
:param page_token: (Optional) Token for fetching next page of results.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.spanner.instance.Instance`
resources within the client's project.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = _options_with_prefix(self.project_name,
page_token=page_token)
path = 'projects/%s' % (self.project,)
page_iter = self.instance_admin_api.list_instances(
path, filter_=filter_, page_size=page_size, options=options)
return page_iterator._GAXIterator(
self, page_iter, _item_to_instance)
def _item_to_instance_config(
iterator, config_pb): # pylint: disable=unused-argument
"""Convert an instance config protobuf to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type config_pb:
:class:`~google.spanner.admin.instance.v1.InstanceConfig`
:param config_pb: An instance config returned from the API.
:rtype: :class:`~google.cloud.spanner.instance.InstanceConfig`
:returns: The next instance config in the page.
"""
return InstanceConfig.from_pb(config_pb)
def _item_to_instance(iterator, instance_pb):
"""Convert an instance protobuf to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type instance_pb: :class:`~google.spanner.admin.instance.v1.Instance`
:param instance_pb: An instance returned from the API.
:rtype: :class:`~google.cloud.spanner.instance.Instance`
:returns: The next instance in the page.
"""
return Instance.from_pb(instance_pb, iterator.client)
| 37.599349 | 148 | 0.659101 |
1aa2e5368ac4dd79b41873f12231768b6630e444 | 2,194 | py | Python | molecule/default/tests/test_default.py | bodsch/ansible-json-exporter | d5c47d90b280c980f5520d7fe30c82c8906dcff1 | [
"Apache-2.0"
] | null | null | null | molecule/default/tests/test_default.py | bodsch/ansible-json-exporter | d5c47d90b280c980f5520d7fe30c82c8906dcff1 | [
"Apache-2.0"
] | null | null | null | molecule/default/tests/test_default.py | bodsch/ansible-json-exporter | d5c47d90b280c980f5520d7fe30c82c8906dcff1 | [
"Apache-2.0"
] | null | null | null |
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
import json
import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def pp_json(json_thing, sort=True, indents=2):
if type(json_thing) is str:
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
return None
def base_directory():
cwd = os.getcwd()
if('group_vars' in os.listdir(cwd)):
directory = "../.."
molecule_directory = "."
else:
directory = "."
molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME'))
return directory, molecule_directory
@pytest.fixture()
def get_vars(host):
"""
"""
base_dir, molecule_dir = base_directory()
file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir)
file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir)
file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir)
defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults")
vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars")
molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars")
ansible_vars = defaults_vars
ansible_vars.update(vars_vars)
ansible_vars.update(molecule_vars)
templar = Templar(loader=DataLoader(), variables=ansible_vars)
result = templar.template(ansible_vars, fail_on_undefined=False)
return result
@pytest.mark.parametrize("files", [
"/usr/bin/json_exporter",
"/etc/json_exporter/config.yml",
"/etc/systemd/system/multi-user.target.wants/json_exporter.service"
])
def test_files(host, files):
f = host.file(files)
assert f.exists
@pytest.mark.parametrize("sockets", [
"tcp://127.0.0.1:7979",
])
def test_socket(host, sockets):
s = host.socket(sockets)
assert s.is_listening
| 28.493506 | 105 | 0.712397 |
e1f3ab1285dfd98990e78603fa98094d56e02546 | 14,485 | py | Python | rqalpha/__main__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | 1 | 2020-11-10T05:44:39.000Z | 2020-11-10T05:44:39.000Z | rqalpha/__main__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | null | null | null | rqalpha/__main__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | 1 | 2020-03-05T05:06:45.000Z | 2020-03-05T05:06:45.000Z | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
import errno
import sys
import os
import shutil
import six
import click
from importlib import import_module
from rqalpha.utils.click_helper import Date
from rqalpha.utils.config import parse_config, dump_config
CONTEXT_SETTINGS = {
'default_map': {
'run': {
}
}
}
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--verbose', count=True)
@click.help_option('-h', '--help')
@click.pass_context
def cli(ctx, verbose):
ctx.obj["VERBOSE"] = verbose
def inject_mod_commands():
from rqalpha.utils.config import get_mod_conf
from rqalpha.mod import SYSTEM_MOD_LIST
from rqalpha.utils.package_helper import import_mod
mod_config = get_mod_conf()
for mod_name, config in six.iteritems(mod_config['mod']):
if 'lib' in config:
lib_name = config["lib"]
else:
lib_name = "rqalpha_mod_{}".format(mod_name)
if not config['enabled']:
continue
try:
if mod_name in SYSTEM_MOD_LIST:
# inject system mod
import_mod("rqalpha.mod." + lib_name)
else:
# inject third part mod
import_mod(lib_name)
except Exception as e:
pass
def entry_point():
inject_mod_commands()
cli(obj={})
@cli.command()
@click.option('-d', '--data-bundle-path', default=os.path.expanduser('~/.rqalpha'), type=click.Path(file_okay=False))
@click.option('--locale', 'locale', type=click.STRING, default="zh_Hans_CN")
def update_bundle(data_bundle_path, locale):
"""
Sync Data Bundle
"""
import rqalpha.utils.bundle_helper
rqalpha.utils.bundle_helper.update_bundle(data_bundle_path, locale)
@cli.command()
@click.help_option('-h', '--help')
# -- Base Configuration
@click.option('-d', '--data-bundle-path', 'base__data_bundle_path', type=click.Path(exists=True))
@click.option('-f', '--strategy-file', 'base__strategy_file', type=click.Path(exists=True))
@click.option('-s', '--start-date', 'base__start_date', type=Date())
@click.option('-e', '--end-date', 'base__end_date', type=Date())
@click.option('-mm', '--margin-multiplier', 'base__margin_multiplier', type=click.FLOAT)
@click.option('-a', '--account', 'base__accounts', nargs=2, multiple=True, help="set account type with starting cash")
@click.option('--position', 'base__init_positions', type=click.STRING, help="set init position")
@click.option('-fq', '--frequency', 'base__frequency', type=click.Choice(['1d', '1m', 'tick']))
@click.option('-rt', '--run-type', 'base__run_type', type=click.Choice(['b', 'p', 'r']), default="b")
@click.option('-rp', '--round-price', 'base__round_price', is_flag=True)
@click.option('-mk', '--market', 'base__market', type=click.Choice(['cn', 'hk']), default=None)
@click.option('--source-code', 'base__source_code')
# -- Extra Configuration
@click.option('-l', '--log-level', 'extra__log_level', type=click.Choice(['verbose', 'debug', 'info', 'error', 'none']))
@click.option('--disable-user-system-log', 'extra__user_system_log_disabled', is_flag=True, help='disable user system log stdout')
@click.option('--disable-user-log', 'extra__user_log_disabled', is_flag=True, help='disable user log stdout')
@click.option('--logger', 'extra__logger', nargs=2, multiple=True, help='config logger, e.g. --logger system_log debug')
@click.option('--locale', 'extra__locale', type=click.Choice(['cn', 'en']), default="cn")
@click.option('--extra-vars', 'extra__context_vars', type=click.STRING, help="override context vars")
@click.option("--enable-profiler", "extra__enable_profiler", is_flag=True, help="add line profiler to profile your strategy")
@click.option('--config', 'config_path', type=click.STRING, help="config file path")
# -- Mod Configuration
@click.option('-mc', '--mod-config', 'mod_configs', nargs=2, multiple=True, type=click.STRING, help="mod extra config")
# for compatible
@click.option('--resume', 'base__resume_mode', is_flag=True, help="[DEPRECATED] --resume is deprecated")
def run(**kwargs):
"""
Start to run a strategy
"""
config_path = kwargs.get('config_path', None)
if config_path is not None:
config_path = os.path.abspath(config_path)
kwargs.pop('config_path')
if not kwargs.get('base__securities', None):
kwargs.pop('base__securities', None)
from rqalpha import main
source_code = kwargs.get("base__source_code")
cfg = parse_config(kwargs, config_path=config_path, click_type=True, source_code=source_code)
source_code = cfg.base.source_code
results = main.run(cfg, source_code=source_code)
# store results into ipython when running in ipython
from rqalpha.utils import is_run_from_ipython
if results is not None and is_run_from_ipython():
import IPython
from rqalpha.utils import RqAttrDict
ipy = IPython.get_ipython()
report = results.get("sys_analyser", {})
ipy.user_global_ns["results"] = results
ipy.user_global_ns["report"] = RqAttrDict(report)
if results is None:
sys.exit(1)
@cli.command()
@click.option('-d', '--directory', default="./", type=click.Path(), required=True)
def examples(directory):
"""
Generate example strategies to target folder
"""
source_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples")
try:
shutil.copytree(source_dir, os.path.join(directory, "examples"))
except OSError as e:
if e.errno == errno.EEXIST:
six.print_("Folder examples is exists.")
@cli.command()
@click.option('-v', '--verbose', is_flag=True)
def version(**kwargs):
"""
Output Version Info
"""
from rqalpha import version_info
six.print_("Current Version: ", version_info)
@cli.command()
@click.option('-d', '--directory', default="./", type=click.Path(), required=True)
def generate_config(directory):
"""
Generate default config file
"""
default_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yml")
target_config_path = os.path.abspath(os.path.join(directory, 'config.yml'))
shutil.copy(default_config, target_config_path)
six.print_("Config file has been generated in", target_config_path)
# For Mod Cli
@cli.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.help_option('-h', '--help')
@click.argument('cmd', nargs=1, type=click.Choice(['list', 'enable', 'disable', 'install', 'uninstall']))
@click.argument('params', nargs=-1)
def mod(cmd, params):
"""
Mod management command
rqalpha mod list \n
rqalpha mod install xxx \n
rqalpha mod uninstall xxx \n
rqalpha mod enable xxx \n
rqalpha mod disable xxx \n
"""
def list(params):
"""
List all mod configuration
"""
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for mod_name, mod in six.iteritems(mod_config['mod']):
table.append([
mod_name,
("enabled" if mod['enabled'] else "disabled")
])
headers = [
"name",
"status"
]
six.print_(tabulate(table, headers=headers, tablefmt="psql"))
six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods")
def install(params):
"""
Install third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.install import InstallCommand
except ImportError:
from pip import main as pip_main
from pip.commands.install import InstallCommand
if hasattr(InstallCommand, "name"):
install_command = InstallCommand()
else:
install_command = InstallCommand(name='install', summary='Install packages.')
pip_main = pip_main.main
params = [param for param in params]
options, mod_list = install_command.parse_args(params)
mod_list = [mod_name for mod_name in mod_list if mod_name != "."]
params = ["install"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Install Mod
installed_result = pip_main(params)
# Export config
from rqalpha.utils.config import load_yaml, user_mod_conf_path
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
if installed_result == 0:
# 如果为0,则说明安装成功
if len(mod_list) == 0:
"""
主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件:
1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下
2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装)
3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下
* 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy`
* 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy`
"""
mod_name = _detect_package_name_from_dir(params)
mod_name = mod_name.replace("-", "_").replace("rqalpha_mod_", "")
mod_list.append(mod_name)
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
if "==" in mod_name:
mod_name = mod_name.split('==')[0]
user_conf['mod'][mod_name] = {}
user_conf['mod'][mod_name]['enabled'] = False
dump_config(user_mod_conf_path(), user_conf)
return installed_result
def uninstall(params):
"""
Uninstall third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.uninstall import UninstallCommand
except ImportError:
# be compatible with pip < 10.0
from pip import main as pip_main
from pip.commands.uninstall import UninstallCommand
if hasattr(UninstallCommand, "name"):
uninstall_command = UninstallCommand()
else:
uninstall_command = UninstallCommand(name='uninstall', summary='Uninstall packages.')
pip_main = pip_main.main
params = [param for param in params]
options, mod_list = uninstall_command.parse_args(params)
params = ["uninstall"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Uninstall Mod
uninstalled_result = pip_main(params)
# Remove Mod Config
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
del user_conf['mod'][mod_name]
dump_config(user_mod_conf_path(), user_conf)
return uninstalled_result
def enable(params):
"""
enable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
# check whether is installed
module_name = "rqalpha_mod_" + mod_name
if module_name.startswith("rqalpha_mod_sys_"):
module_name = "rqalpha.mod." + module_name
try:
import_module(module_name)
except ImportError:
installed_result = install([module_name])
if installed_result != 0:
return
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True}
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
"""
disable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False}
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params)
def _detect_package_name_from_dir(params):
setup_path = os.path.join(os.path.abspath(params[-1]), 'setup.py')
if not os.path.exists(setup_path):
return None
return os.path.split(os.path.dirname(setup_path))[1]
if __name__ == '__main__':
entry_point()
| 36.032338 | 144 | 0.633207 |
a0598ecf3d82fd733959829849b04bed77687e1f | 6,646 | py | Python | ppocr/modeling/backbones/kie_unet_sdmgr.py | pop123123123/PaddleOCR | c7692276ab675a687941debbd86ada179cd790a1 | [
"Apache-2.0"
] | null | null | null | ppocr/modeling/backbones/kie_unet_sdmgr.py | pop123123123/PaddleOCR | c7692276ab675a687941debbd86ada179cd790a1 | [
"Apache-2.0"
] | null | null | null | ppocr/modeling/backbones/kie_unet_sdmgr.py | pop123123123/PaddleOCR | c7692276ab675a687941debbd86ada179cd790a1 | [
"Apache-2.0"
] | null | null | null | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
import numpy as np
import cv2
__all__ = ["Kie_backbone"]
class Encoder(nn.Layer):
def __init__(self, num_channels, num_filters):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2D(
num_channels,
num_filters,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False)
self.bn1 = nn.BatchNorm(num_filters, act='relu')
self.conv2 = nn.Conv2D(
num_filters,
num_filters,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False)
self.bn2 = nn.BatchNorm(num_filters, act='relu')
self.pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.conv2(x)
x = self.bn2(x)
x_pooled = self.pool(x)
return x, x_pooled
class Decoder(nn.Layer):
def __init__(self, num_channels, num_filters):
super(Decoder, self).__init__()
self.conv1 = nn.Conv2D(
num_channels,
num_filters,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False)
self.bn1 = nn.BatchNorm(num_filters, act='relu')
self.conv2 = nn.Conv2D(
num_filters,
num_filters,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False)
self.bn2 = nn.BatchNorm(num_filters, act='relu')
self.conv0 = nn.Conv2D(
num_channels,
num_filters,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.bn0 = nn.BatchNorm(num_filters, act='relu')
def forward(self, inputs_prev, inputs):
x = self.conv0(inputs)
x = self.bn0(x)
x = paddle.nn.functional.interpolate(
x, scale_factor=2, mode='bilinear', align_corners=False)
x = paddle.concat([inputs_prev, x], axis=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.conv2(x)
x = self.bn2(x)
return x
class UNet(nn.Layer):
def __init__(self):
super(UNet, self).__init__()
self.down1 = Encoder(num_channels=3, num_filters=16)
self.down2 = Encoder(num_channels=16, num_filters=32)
self.down3 = Encoder(num_channels=32, num_filters=64)
self.down4 = Encoder(num_channels=64, num_filters=128)
self.down5 = Encoder(num_channels=128, num_filters=256)
self.up1 = Decoder(32, 16)
self.up2 = Decoder(64, 32)
self.up3 = Decoder(128, 64)
self.up4 = Decoder(256, 128)
self.out_channels = 16
def forward(self, inputs):
x1, _ = self.down1(inputs)
_, x2 = self.down2(x1)
_, x3 = self.down3(x2)
_, x4 = self.down4(x3)
_, x5 = self.down5(x4)
x = self.up4(x4, x5)
x = self.up3(x3, x)
x = self.up2(x2, x)
x = self.up1(x1, x)
return x
class Kie_backbone(nn.Layer):
def __init__(self, in_channels, **kwargs):
super(Kie_backbone, self).__init__()
self.out_channels = 16
self.img_feat = UNet()
self.maxpool = nn.MaxPool2D(kernel_size=7)
def bbox2roi(self, bbox_list):
rois_list = []
rois_num = []
for img_id, bboxes in enumerate(bbox_list):
rois_num.append(bboxes.shape[0])
rois_list.append(bboxes)
rois = paddle.concat(rois_list, 0)
rois_num = paddle.to_tensor(rois_num, dtype='int32')
return rois, rois_num
def pre_process(self, img, relations, texts, gt_bboxes, tag, img_size):
img, relations, texts, gt_bboxes, tag, img_size = img.numpy(
), relations.numpy(), texts.numpy(), gt_bboxes.numpy(), tag.numpy(
).tolist(), img_size.numpy()
temp_relations, temp_texts, temp_gt_bboxes = [], [], []
h, w = int(np.max(img_size[:, 0])), int(np.max(img_size[:, 1]))
img = paddle.to_tensor(img[:, :, :h, :w])
batch = len(tag)
for i in range(batch):
num, recoder_len = tag[i][0], tag[i][1]
temp_relations.append(
paddle.to_tensor(
relations[i, :num, :num, :], dtype='float32'))
temp_texts.append(
paddle.to_tensor(
texts[i, :num, :recoder_len], dtype='float32'))
temp_gt_bboxes.append(
paddle.to_tensor(
gt_bboxes[i, :num, ...], dtype='float32'))
return img, temp_relations, temp_texts, temp_gt_bboxes
def _preprocess(self, inputs):
img = inputs[0]
relations, texts, gt_bboxes, tag, img_size = inputs[1], inputs[
2], inputs[3], inputs[5], inputs[-1]
img, relations, texts, gt_bboxes = self.pre_process(
img, relations, texts, gt_bboxes, tag, img_size)
boxes, rois_num = self.bbox2roi(gt_bboxes)
return img, relations[0], texts[0], boxes, rois_num
def forward(self, inputs):
if self.training:
img = inputs[0]
relations, texts, gt_bboxes, tag, img_size = inputs[1], inputs[
2], inputs[3], inputs[5], inputs[-1]
img, relations, texts, gt_bboxes = self.pre_process(
img, relations, texts, gt_bboxes, tag, img_size)
boxes, rois_num = self.bbox2roi(gt_bboxes)
else:
img, relations, texts, boxes, rois_num = inputs
x = self.img_feat(img)
feats = paddle.fluid.layers.roi_align(
x,
boxes,
spatial_scale=1.0,
pooled_height=7,
pooled_width=7,
rois_num=rois_num)
feats = self.maxpool(feats).squeeze(-1).squeeze(-1)
return [relations, texts, feats]
| 32.738916 | 75 | 0.577189 |
ca897f2412005125d933155ced4b2c25ecaea403 | 78,297 | py | Python | iot/google/cloud/iot_v1/proto/resources_pb2.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | null | null | null | iot/google/cloud/iot_v1/proto/resources_pb2.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | iot/google/cloud/iot_v1/proto/resources_pb2.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | 2 | 2019-07-18T00:05:31.000Z | 2019-11-27T14:17:22.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/iot_v1/proto/resources.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/iot_v1/proto/resources.proto",
package="google.cloud.iot.v1",
syntax="proto3",
serialized_options=_b(
"\n\027com.google.cloud.iot.v1B\016ResourcesProtoP\001Z6google.golang.org/genproto/googleapis/cloud/iot/v1;iot\370\001\001"
),
serialized_pb=_b(
'\n)google/cloud/iot_v1/proto/resources.proto\x12\x13google.cloud.iot.v1\x1a\x19google/api/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xb0\x07\n\x06\x44\x65vice\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06num_id\x18\x03 \x01(\x04\x12:\n\x0b\x63redentials\x18\x0c \x03(\x0b\x32%.google.cloud.iot.v1.DeviceCredential\x12\x37\n\x13last_heartbeat_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x0flast_event_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x0flast_state_time\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14last_config_ack_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x39\n\x15last_config_send_time\x18\x12 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07\x62locked\x18\x13 \x01(\x08\x12\x33\n\x0flast_error_time\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12-\n\x11last_error_status\x18\x0b \x01(\x0b\x32\x12.google.rpc.Status\x12\x31\n\x06\x63onfig\x18\r \x01(\x0b\x32!.google.cloud.iot.v1.DeviceConfig\x12/\n\x05state\x18\x10 \x01(\x0b\x32 .google.cloud.iot.v1.DeviceState\x12\x30\n\tlog_level\x18\x15 \x01(\x0e\x32\x1d.google.cloud.iot.v1.LogLevel\x12;\n\x08metadata\x18\x11 \x03(\x0b\x32).google.cloud.iot.v1.Device.MetadataEntry\x12:\n\x0egateway_config\x18\x18 \x01(\x0b\x32".google.cloud.iot.v1.GatewayConfig\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:s\xea\x41p\n\x1e\x63loudiot.googleapis.com/Device\x12Nprojects/{project}/locations/{location}/registries/{registry}/devices/{device}"\xee\x01\n\rGatewayConfig\x12\x36\n\x0cgateway_type\x18\x01 \x01(\x0e\x32 .google.cloud.iot.v1.GatewayType\x12\x43\n\x13gateway_auth_method\x18\x02 \x01(\x0e\x32&.google.cloud.iot.v1.GatewayAuthMethod\x12 \n\x18last_accessed_gateway_id\x18\x03 \x01(\t\x12>\n\x1alast_accessed_gateway_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x8f\x04\n\x0e\x44\x65viceRegistry\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12P\n\x1a\x65vent_notification_configs\x18\n \x03(\x0b\x32,.google.cloud.iot.v1.EventNotificationConfig\x12O\n\x19state_notification_config\x18\x07 \x01(\x0b\x32,.google.cloud.iot.v1.StateNotificationConfig\x12\x34\n\x0bmqtt_config\x18\x04 \x01(\x0b\x32\x1f.google.cloud.iot.v1.MqttConfig\x12\x34\n\x0bhttp_config\x18\t \x01(\x0b\x32\x1f.google.cloud.iot.v1.HttpConfig\x12\x30\n\tlog_level\x18\x0b \x01(\x0e\x32\x1d.google.cloud.iot.v1.LogLevel\x12<\n\x0b\x63redentials\x18\x08 \x03(\x0b\x32\'.google.cloud.iot.v1.RegistryCredential:d\xea\x41\x61\n cloudiot.googleapis.com/Registry\x12=projects/{project}/locations/{location}/registries/{registry}"H\n\nMqttConfig\x12:\n\x12mqtt_enabled_state\x18\x01 \x01(\x0e\x32\x1e.google.cloud.iot.v1.MqttState"H\n\nHttpConfig\x12:\n\x12http_enabled_state\x18\x01 \x01(\x0e\x32\x1e.google.cloud.iot.v1.HttpState"O\n\x17\x45ventNotificationConfig\x12\x19\n\x11subfolder_matches\x18\x02 \x01(\t\x12\x19\n\x11pubsub_topic_name\x18\x01 \x01(\t"4\n\x17StateNotificationConfig\x12\x19\n\x11pubsub_topic_name\x18\x01 \x01(\t"o\n\x12RegistryCredential\x12K\n\x16public_key_certificate\x18\x01 \x01(\x0b\x32).google.cloud.iot.v1.PublicKeyCertificateH\x00\x42\x0c\n\ncredential"\xd0\x01\n\x16X509CertificateDetails\x12\x0e\n\x06issuer\x18\x01 \x01(\t\x12\x0f\n\x07subject\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x65xpiry_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1b\n\x13signature_algorithm\x18\x05 \x01(\t\x12\x17\n\x0fpublic_key_type\x18\x06 \x01(\t"\xaf\x01\n\x14PublicKeyCertificate\x12?\n\x06\x66ormat\x18\x01 \x01(\x0e\x32/.google.cloud.iot.v1.PublicKeyCertificateFormat\x12\x13\n\x0b\x63\x65rtificate\x18\x02 \x01(\t\x12\x41\n\x0cx509_details\x18\x03 \x01(\x0b\x32+.google.cloud.iot.v1.X509CertificateDetails"\x95\x01\n\x10\x44\x65viceCredential\x12>\n\npublic_key\x18\x02 \x01(\x0b\x32(.google.cloud.iot.v1.PublicKeyCredentialH\x00\x12\x33\n\x0f\x65xpiration_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x0c\n\ncredential"X\n\x13PublicKeyCredential\x12\x34\n\x06\x66ormat\x18\x01 \x01(\x0e\x32$.google.cloud.iot.v1.PublicKeyFormat\x12\x0b\n\x03key\x18\x02 \x01(\t"\xa0\x01\n\x0c\x44\x65viceConfig\x12\x0f\n\x07version\x18\x01 \x01(\x03\x12\x35\n\x11\x63loud_update_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x0f\x64\x65vice_ack_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0b\x62inary_data\x18\x04 \x01(\x0c"S\n\x0b\x44\x65viceState\x12/\n\x0bupdate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0b\x62inary_data\x18\x02 \x01(\x0c*L\n\tMqttState\x12\x1a\n\x16MQTT_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cMQTT_ENABLED\x10\x01\x12\x11\n\rMQTT_DISABLED\x10\x02*L\n\tHttpState\x12\x1a\n\x16HTTP_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cHTTP_ENABLED\x10\x01\x12\x11\n\rHTTP_DISABLED\x10\x02*O\n\x08LogLevel\x12\x19\n\x15LOG_LEVEL_UNSPECIFIED\x10\x00\x12\x08\n\x04NONE\x10\n\x12\t\n\x05\x45RROR\x10\x14\x12\x08\n\x04INFO\x10\x1e\x12\t\n\x05\x44\x45\x42UG\x10(*I\n\x0bGatewayType\x12\x1c\n\x18GATEWAY_TYPE_UNSPECIFIED\x10\x00\x12\x0b\n\x07GATEWAY\x10\x01\x12\x0f\n\x0bNON_GATEWAY\x10\x02*\x91\x01\n\x11GatewayAuthMethod\x12#\n\x1fGATEWAY_AUTH_METHOD_UNSPECIFIED\x10\x00\x12\x14\n\x10\x41SSOCIATION_ONLY\x10\x01\x12\x1a\n\x16\x44\x45VICE_AUTH_TOKEN_ONLY\x10\x02\x12%\n!ASSOCIATION_AND_DEVICE_AUTH_TOKEN\x10\x03*e\n\x1aPublicKeyCertificateFormat\x12-\n)UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT\x10\x00\x12\x18\n\x14X509_CERTIFICATE_PEM\x10\x01*v\n\x0fPublicKeyFormat\x12!\n\x1dUNSPECIFIED_PUBLIC_KEY_FORMAT\x10\x00\x12\x0b\n\x07RSA_PEM\x10\x03\x12\x10\n\x0cRSA_X509_PEM\x10\x01\x12\r\n\tES256_PEM\x10\x02\x12\x12\n\x0e\x45S256_X509_PEM\x10\x04\x42\x66\n\x17\x63om.google.cloud.iot.v1B\x0eResourcesProtoP\x01Z6google.golang.org/genproto/googleapis/cloud/iot/v1;iot\xf8\x01\x01\x62\x06proto3'
),
dependencies=[
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
],
)
_MQTTSTATE = _descriptor.EnumDescriptor(
name="MqttState",
full_name="google.cloud.iot.v1.MqttState",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MQTT_STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MQTT_ENABLED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MQTT_DISABLED", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3144,
serialized_end=3220,
)
_sym_db.RegisterEnumDescriptor(_MQTTSTATE)
MqttState = enum_type_wrapper.EnumTypeWrapper(_MQTTSTATE)
_HTTPSTATE = _descriptor.EnumDescriptor(
name="HttpState",
full_name="google.cloud.iot.v1.HttpState",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="HTTP_STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="HTTP_ENABLED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HTTP_DISABLED", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3222,
serialized_end=3298,
)
_sym_db.RegisterEnumDescriptor(_HTTPSTATE)
HttpState = enum_type_wrapper.EnumTypeWrapper(_HTTPSTATE)
_LOGLEVEL = _descriptor.EnumDescriptor(
name="LogLevel",
full_name="google.cloud.iot.v1.LogLevel",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="LOG_LEVEL_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="NONE", index=1, number=10, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ERROR", index=2, number=20, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="INFO", index=3, number=30, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DEBUG", index=4, number=40, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3300,
serialized_end=3379,
)
_sym_db.RegisterEnumDescriptor(_LOGLEVEL)
LogLevel = enum_type_wrapper.EnumTypeWrapper(_LOGLEVEL)
_GATEWAYTYPE = _descriptor.EnumDescriptor(
name="GatewayType",
full_name="google.cloud.iot.v1.GatewayType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="GATEWAY_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="GATEWAY", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NON_GATEWAY", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3381,
serialized_end=3454,
)
_sym_db.RegisterEnumDescriptor(_GATEWAYTYPE)
GatewayType = enum_type_wrapper.EnumTypeWrapper(_GATEWAYTYPE)
_GATEWAYAUTHMETHOD = _descriptor.EnumDescriptor(
name="GatewayAuthMethod",
full_name="google.cloud.iot.v1.GatewayAuthMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="GATEWAY_AUTH_METHOD_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ASSOCIATION_ONLY",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="DEVICE_AUTH_TOKEN_ONLY",
index=2,
number=2,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ASSOCIATION_AND_DEVICE_AUTH_TOKEN",
index=3,
number=3,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=3457,
serialized_end=3602,
)
_sym_db.RegisterEnumDescriptor(_GATEWAYAUTHMETHOD)
GatewayAuthMethod = enum_type_wrapper.EnumTypeWrapper(_GATEWAYAUTHMETHOD)
_PUBLICKEYCERTIFICATEFORMAT = _descriptor.EnumDescriptor(
name="PublicKeyCertificateFormat",
full_name="google.cloud.iot.v1.PublicKeyCertificateFormat",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="X509_CERTIFICATE_PEM",
index=1,
number=1,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=3604,
serialized_end=3705,
)
_sym_db.RegisterEnumDescriptor(_PUBLICKEYCERTIFICATEFORMAT)
PublicKeyCertificateFormat = enum_type_wrapper.EnumTypeWrapper(
_PUBLICKEYCERTIFICATEFORMAT
)
_PUBLICKEYFORMAT = _descriptor.EnumDescriptor(
name="PublicKeyFormat",
full_name="google.cloud.iot.v1.PublicKeyFormat",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNSPECIFIED_PUBLIC_KEY_FORMAT",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="RSA_PEM", index=1, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RSA_X509_PEM", index=2, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ES256_PEM", index=3, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ES256_X509_PEM", index=4, number=4, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=3707,
serialized_end=3825,
)
_sym_db.RegisterEnumDescriptor(_PUBLICKEYFORMAT)
PublicKeyFormat = enum_type_wrapper.EnumTypeWrapper(_PUBLICKEYFORMAT)
MQTT_STATE_UNSPECIFIED = 0
MQTT_ENABLED = 1
MQTT_DISABLED = 2
HTTP_STATE_UNSPECIFIED = 0
HTTP_ENABLED = 1
HTTP_DISABLED = 2
LOG_LEVEL_UNSPECIFIED = 0
NONE = 10
ERROR = 20
INFO = 30
DEBUG = 40
GATEWAY_TYPE_UNSPECIFIED = 0
GATEWAY = 1
NON_GATEWAY = 2
GATEWAY_AUTH_METHOD_UNSPECIFIED = 0
ASSOCIATION_ONLY = 1
DEVICE_AUTH_TOKEN_ONLY = 2
ASSOCIATION_AND_DEVICE_AUTH_TOKEN = 3
UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT = 0
X509_CERTIFICATE_PEM = 1
UNSPECIFIED_PUBLIC_KEY_FORMAT = 0
RSA_PEM = 3
RSA_X509_PEM = 1
ES256_PEM = 2
ES256_X509_PEM = 4
_DEVICE_METADATAENTRY = _descriptor.Descriptor(
name="MetadataEntry",
full_name="google.cloud.iot.v1.Device.MetadataEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.iot.v1.Device.MetadataEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.iot.v1.Device.MetadataEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=932,
serialized_end=979,
)
_DEVICE = _descriptor.Descriptor(
name="Device",
full_name="google.cloud.iot.v1.Device",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="google.cloud.iot.v1.Device.id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.iot.v1.Device.name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_id",
full_name="google.cloud.iot.v1.Device.num_id",
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="credentials",
full_name="google.cloud.iot.v1.Device.credentials",
index=3,
number=12,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_heartbeat_time",
full_name="google.cloud.iot.v1.Device.last_heartbeat_time",
index=4,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_event_time",
full_name="google.cloud.iot.v1.Device.last_event_time",
index=5,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_state_time",
full_name="google.cloud.iot.v1.Device.last_state_time",
index=6,
number=20,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_config_ack_time",
full_name="google.cloud.iot.v1.Device.last_config_ack_time",
index=7,
number=14,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_config_send_time",
full_name="google.cloud.iot.v1.Device.last_config_send_time",
index=8,
number=18,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="blocked",
full_name="google.cloud.iot.v1.Device.blocked",
index=9,
number=19,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_error_time",
full_name="google.cloud.iot.v1.Device.last_error_time",
index=10,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_error_status",
full_name="google.cloud.iot.v1.Device.last_error_status",
index=11,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="config",
full_name="google.cloud.iot.v1.Device.config",
index=12,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.iot.v1.Device.state",
index=13,
number=16,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="log_level",
full_name="google.cloud.iot.v1.Device.log_level",
index=14,
number=21,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="metadata",
full_name="google.cloud.iot.v1.Device.metadata",
index=15,
number=17,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="gateway_config",
full_name="google.cloud.iot.v1.Device.gateway_config",
index=16,
number=24,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_DEVICE_METADATAENTRY],
enum_types=[],
serialized_options=_b(
"\352Ap\n\036cloudiot.googleapis.com/Device\022Nprojects/{project}/locations/{location}/registries/{registry}/devices/{device}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=152,
serialized_end=1096,
)
_GATEWAYCONFIG = _descriptor.Descriptor(
name="GatewayConfig",
full_name="google.cloud.iot.v1.GatewayConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="gateway_type",
full_name="google.cloud.iot.v1.GatewayConfig.gateway_type",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="gateway_auth_method",
full_name="google.cloud.iot.v1.GatewayConfig.gateway_auth_method",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_accessed_gateway_id",
full_name="google.cloud.iot.v1.GatewayConfig.last_accessed_gateway_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_accessed_gateway_time",
full_name="google.cloud.iot.v1.GatewayConfig.last_accessed_gateway_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1099,
serialized_end=1337,
)
_DEVICEREGISTRY = _descriptor.Descriptor(
name="DeviceRegistry",
full_name="google.cloud.iot.v1.DeviceRegistry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="google.cloud.iot.v1.DeviceRegistry.id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.iot.v1.DeviceRegistry.name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="event_notification_configs",
full_name="google.cloud.iot.v1.DeviceRegistry.event_notification_configs",
index=2,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_notification_config",
full_name="google.cloud.iot.v1.DeviceRegistry.state_notification_config",
index=3,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mqtt_config",
full_name="google.cloud.iot.v1.DeviceRegistry.mqtt_config",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="http_config",
full_name="google.cloud.iot.v1.DeviceRegistry.http_config",
index=5,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="log_level",
full_name="google.cloud.iot.v1.DeviceRegistry.log_level",
index=6,
number=11,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="credentials",
full_name="google.cloud.iot.v1.DeviceRegistry.credentials",
index=7,
number=8,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b(
"\352Aa\n cloudiot.googleapis.com/Registry\022=projects/{project}/locations/{location}/registries/{registry}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1340,
serialized_end=1867,
)
_MQTTCONFIG = _descriptor.Descriptor(
name="MqttConfig",
full_name="google.cloud.iot.v1.MqttConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="mqtt_enabled_state",
full_name="google.cloud.iot.v1.MqttConfig.mqtt_enabled_state",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1869,
serialized_end=1941,
)
_HTTPCONFIG = _descriptor.Descriptor(
name="HttpConfig",
full_name="google.cloud.iot.v1.HttpConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="http_enabled_state",
full_name="google.cloud.iot.v1.HttpConfig.http_enabled_state",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1943,
serialized_end=2015,
)
_EVENTNOTIFICATIONCONFIG = _descriptor.Descriptor(
name="EventNotificationConfig",
full_name="google.cloud.iot.v1.EventNotificationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subfolder_matches",
full_name="google.cloud.iot.v1.EventNotificationConfig.subfolder_matches",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="pubsub_topic_name",
full_name="google.cloud.iot.v1.EventNotificationConfig.pubsub_topic_name",
index=1,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2017,
serialized_end=2096,
)
_STATENOTIFICATIONCONFIG = _descriptor.Descriptor(
name="StateNotificationConfig",
full_name="google.cloud.iot.v1.StateNotificationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pubsub_topic_name",
full_name="google.cloud.iot.v1.StateNotificationConfig.pubsub_topic_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2098,
serialized_end=2150,
)
_REGISTRYCREDENTIAL = _descriptor.Descriptor(
name="RegistryCredential",
full_name="google.cloud.iot.v1.RegistryCredential",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="public_key_certificate",
full_name="google.cloud.iot.v1.RegistryCredential.public_key_certificate",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="credential",
full_name="google.cloud.iot.v1.RegistryCredential.credential",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=2152,
serialized_end=2263,
)
_X509CERTIFICATEDETAILS = _descriptor.Descriptor(
name="X509CertificateDetails",
full_name="google.cloud.iot.v1.X509CertificateDetails",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="issuer",
full_name="google.cloud.iot.v1.X509CertificateDetails.issuer",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="subject",
full_name="google.cloud.iot.v1.X509CertificateDetails.subject",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.iot.v1.X509CertificateDetails.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expiry_time",
full_name="google.cloud.iot.v1.X509CertificateDetails.expiry_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="signature_algorithm",
full_name="google.cloud.iot.v1.X509CertificateDetails.signature_algorithm",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="public_key_type",
full_name="google.cloud.iot.v1.X509CertificateDetails.public_key_type",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2266,
serialized_end=2474,
)
_PUBLICKEYCERTIFICATE = _descriptor.Descriptor(
name="PublicKeyCertificate",
full_name="google.cloud.iot.v1.PublicKeyCertificate",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="format",
full_name="google.cloud.iot.v1.PublicKeyCertificate.format",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="certificate",
full_name="google.cloud.iot.v1.PublicKeyCertificate.certificate",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="x509_details",
full_name="google.cloud.iot.v1.PublicKeyCertificate.x509_details",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2477,
serialized_end=2652,
)
_DEVICECREDENTIAL = _descriptor.Descriptor(
name="DeviceCredential",
full_name="google.cloud.iot.v1.DeviceCredential",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="public_key",
full_name="google.cloud.iot.v1.DeviceCredential.public_key",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expiration_time",
full_name="google.cloud.iot.v1.DeviceCredential.expiration_time",
index=1,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="credential",
full_name="google.cloud.iot.v1.DeviceCredential.credential",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=2655,
serialized_end=2804,
)
_PUBLICKEYCREDENTIAL = _descriptor.Descriptor(
name="PublicKeyCredential",
full_name="google.cloud.iot.v1.PublicKeyCredential",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="format",
full_name="google.cloud.iot.v1.PublicKeyCredential.format",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.iot.v1.PublicKeyCredential.key",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2806,
serialized_end=2894,
)
_DEVICECONFIG = _descriptor.Descriptor(
name="DeviceConfig",
full_name="google.cloud.iot.v1.DeviceConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="version",
full_name="google.cloud.iot.v1.DeviceConfig.version",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cloud_update_time",
full_name="google.cloud.iot.v1.DeviceConfig.cloud_update_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="device_ack_time",
full_name="google.cloud.iot.v1.DeviceConfig.device_ack_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="binary_data",
full_name="google.cloud.iot.v1.DeviceConfig.binary_data",
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2897,
serialized_end=3057,
)
_DEVICESTATE = _descriptor.Descriptor(
name="DeviceState",
full_name="google.cloud.iot.v1.DeviceState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.cloud.iot.v1.DeviceState.update_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="binary_data",
full_name="google.cloud.iot.v1.DeviceState.binary_data",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3059,
serialized_end=3142,
)
_DEVICE_METADATAENTRY.containing_type = _DEVICE
_DEVICE.fields_by_name["credentials"].message_type = _DEVICECREDENTIAL
_DEVICE.fields_by_name[
"last_heartbeat_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_event_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_state_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_config_ack_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_config_send_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_error_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICE.fields_by_name[
"last_error_status"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
_DEVICE.fields_by_name["config"].message_type = _DEVICECONFIG
_DEVICE.fields_by_name["state"].message_type = _DEVICESTATE
_DEVICE.fields_by_name["log_level"].enum_type = _LOGLEVEL
_DEVICE.fields_by_name["metadata"].message_type = _DEVICE_METADATAENTRY
_DEVICE.fields_by_name["gateway_config"].message_type = _GATEWAYCONFIG
_GATEWAYCONFIG.fields_by_name["gateway_type"].enum_type = _GATEWAYTYPE
_GATEWAYCONFIG.fields_by_name["gateway_auth_method"].enum_type = _GATEWAYAUTHMETHOD
_GATEWAYCONFIG.fields_by_name[
"last_accessed_gateway_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICEREGISTRY.fields_by_name[
"event_notification_configs"
].message_type = _EVENTNOTIFICATIONCONFIG
_DEVICEREGISTRY.fields_by_name[
"state_notification_config"
].message_type = _STATENOTIFICATIONCONFIG
_DEVICEREGISTRY.fields_by_name["mqtt_config"].message_type = _MQTTCONFIG
_DEVICEREGISTRY.fields_by_name["http_config"].message_type = _HTTPCONFIG
_DEVICEREGISTRY.fields_by_name["log_level"].enum_type = _LOGLEVEL
_DEVICEREGISTRY.fields_by_name["credentials"].message_type = _REGISTRYCREDENTIAL
_MQTTCONFIG.fields_by_name["mqtt_enabled_state"].enum_type = _MQTTSTATE
_HTTPCONFIG.fields_by_name["http_enabled_state"].enum_type = _HTTPSTATE
_REGISTRYCREDENTIAL.fields_by_name[
"public_key_certificate"
].message_type = _PUBLICKEYCERTIFICATE
_REGISTRYCREDENTIAL.oneofs_by_name["credential"].fields.append(
_REGISTRYCREDENTIAL.fields_by_name["public_key_certificate"]
)
_REGISTRYCREDENTIAL.fields_by_name[
"public_key_certificate"
].containing_oneof = _REGISTRYCREDENTIAL.oneofs_by_name["credential"]
_X509CERTIFICATEDETAILS.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_X509CERTIFICATEDETAILS.fields_by_name[
"expiry_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_PUBLICKEYCERTIFICATE.fields_by_name["format"].enum_type = _PUBLICKEYCERTIFICATEFORMAT
_PUBLICKEYCERTIFICATE.fields_by_name[
"x509_details"
].message_type = _X509CERTIFICATEDETAILS
_DEVICECREDENTIAL.fields_by_name["public_key"].message_type = _PUBLICKEYCREDENTIAL
_DEVICECREDENTIAL.fields_by_name[
"expiration_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICECREDENTIAL.oneofs_by_name["credential"].fields.append(
_DEVICECREDENTIAL.fields_by_name["public_key"]
)
_DEVICECREDENTIAL.fields_by_name[
"public_key"
].containing_oneof = _DEVICECREDENTIAL.oneofs_by_name["credential"]
_PUBLICKEYCREDENTIAL.fields_by_name["format"].enum_type = _PUBLICKEYFORMAT
_DEVICECONFIG.fields_by_name[
"cloud_update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICECONFIG.fields_by_name[
"device_ack_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEVICESTATE.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["Device"] = _DEVICE
DESCRIPTOR.message_types_by_name["GatewayConfig"] = _GATEWAYCONFIG
DESCRIPTOR.message_types_by_name["DeviceRegistry"] = _DEVICEREGISTRY
DESCRIPTOR.message_types_by_name["MqttConfig"] = _MQTTCONFIG
DESCRIPTOR.message_types_by_name["HttpConfig"] = _HTTPCONFIG
DESCRIPTOR.message_types_by_name["EventNotificationConfig"] = _EVENTNOTIFICATIONCONFIG
DESCRIPTOR.message_types_by_name["StateNotificationConfig"] = _STATENOTIFICATIONCONFIG
DESCRIPTOR.message_types_by_name["RegistryCredential"] = _REGISTRYCREDENTIAL
DESCRIPTOR.message_types_by_name["X509CertificateDetails"] = _X509CERTIFICATEDETAILS
DESCRIPTOR.message_types_by_name["PublicKeyCertificate"] = _PUBLICKEYCERTIFICATE
DESCRIPTOR.message_types_by_name["DeviceCredential"] = _DEVICECREDENTIAL
DESCRIPTOR.message_types_by_name["PublicKeyCredential"] = _PUBLICKEYCREDENTIAL
DESCRIPTOR.message_types_by_name["DeviceConfig"] = _DEVICECONFIG
DESCRIPTOR.message_types_by_name["DeviceState"] = _DEVICESTATE
DESCRIPTOR.enum_types_by_name["MqttState"] = _MQTTSTATE
DESCRIPTOR.enum_types_by_name["HttpState"] = _HTTPSTATE
DESCRIPTOR.enum_types_by_name["LogLevel"] = _LOGLEVEL
DESCRIPTOR.enum_types_by_name["GatewayType"] = _GATEWAYTYPE
DESCRIPTOR.enum_types_by_name["GatewayAuthMethod"] = _GATEWAYAUTHMETHOD
DESCRIPTOR.enum_types_by_name[
"PublicKeyCertificateFormat"
] = _PUBLICKEYCERTIFICATEFORMAT
DESCRIPTOR.enum_types_by_name["PublicKeyFormat"] = _PUBLICKEYFORMAT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Device = _reflection.GeneratedProtocolMessageType(
"Device",
(_message.Message,),
dict(
MetadataEntry=_reflection.GeneratedProtocolMessageType(
"MetadataEntry",
(_message.Message,),
dict(
DESCRIPTOR=_DEVICE_METADATAENTRY,
__module__="google.cloud.iot_v1.proto.resources_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.Device.MetadataEntry)
),
),
DESCRIPTOR=_DEVICE,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The device resource.
Attributes:
id:
The user-defined device identifier. The device ID must be
unique within a device registry.
name:
The resource path name. For example,
``projects/p1/locations/us-
central1/registries/registry0/devices/dev0`` or
``projects/p1/locations/us-
central1/registries/registry0/devices/{num_id}``. When
``name`` is populated as a response from the service, it
always ends in the device numeric ID.
num_id:
[Output only] A server-defined unique numeric ID for the
device. This is a more compact way to identify devices, and it
is globally unique.
credentials:
The credentials used to authenticate this device. To allow
credential rotation without interruption, multiple device
credentials can be bound to this device. No more than 3
credentials can be bound to a single device at a time. When
new credentials are added to a device, they are verified
against the registry credentials. For details, see the
description of the ``DeviceRegistry.credentials`` field.
last_heartbeat_time:
[Output only] The last time an MQTT ``PINGREQ`` was received.
This field applies only to devices connecting through MQTT.
MQTT clients usually only send ``PINGREQ`` messages if the
connection is idle, and no other messages have been sent.
Timestamps are periodically collected and written to storage;
they may be stale by a few minutes.
last_event_time:
[Output only] The last time a telemetry event was received.
Timestamps are periodically collected and written to storage;
they may be stale by a few minutes.
last_state_time:
[Output only] The last time a state event was received.
Timestamps are periodically collected and written to storage;
they may be stale by a few minutes.
last_config_ack_time:
[Output only] The last time a cloud-to-device config version
acknowledgment was received from the device. This field is
only for configurations sent through MQTT.
last_config_send_time:
[Output only] The last time a cloud-to-device config version
was sent to the device.
blocked:
If a device is blocked, connections or requests from this
device will fail. Can be used to temporarily prevent the
device from connecting if, for example, the sensor is
generating bad data and needs maintenance.
last_error_time:
[Output only] The time the most recent error occurred, such as
a failure to publish to Cloud Pub/Sub. This field is the
timestamp of 'last\_error\_status'.
last_error_status:
[Output only] The error message of the most recent error, such
as a failure to publish to Cloud Pub/Sub. 'last\_error\_time'
is the timestamp of this field. If no errors have occurred,
this field has an empty message and the status code 0 == OK.
Otherwise, this field is expected to have a status code other
than OK.
config:
The most recent device configuration, which is eventually sent
from Cloud IoT Core to the device. If not present on creation,
the configuration will be initialized with an empty payload
and version value of ``1``. To update this field after
creation, use the ``DeviceManager.ModifyCloudToDeviceConfig``
method.
state:
[Output only] The state most recently received from the
device. If no state has been reported, this field is not
present.
log_level:
\ **Beta Feature** The logging verbosity for device activity.
If unspecified, DeviceRegistry.log\_level will be used.
metadata:
The metadata key-value pairs assigned to the device. This
metadata is not interpreted or indexed by Cloud IoT Core. It
can be used to add contextual information for the device.
Keys must conform to the regular expression
[a-zA-Z][a-zA-Z0-9-\_.+~%]+ and be less than 128 bytes in
length. Values are free-form strings. Each value must be less
than or equal to 32 KB in size. The total size of all keys
and values must be less than 256 KB, and the maximum number of
key-value pairs is 500.
gateway_config:
Gateway-related configuration and state.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.Device)
),
)
_sym_db.RegisterMessage(Device)
_sym_db.RegisterMessage(Device.MetadataEntry)
GatewayConfig = _reflection.GeneratedProtocolMessageType(
"GatewayConfig",
(_message.Message,),
dict(
DESCRIPTOR=_GATEWAYCONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""Gateway-related configuration and state.
Attributes:
gateway_type:
Indicates whether the device is a gateway.
gateway_auth_method:
Indicates how to authorize and/or authenticate devices to
access the gateway.
last_accessed_gateway_id:
[Output only] The ID of the gateway the device accessed most
recently.
last_accessed_gateway_time:
[Output only] The most recent time at which the device
accessed the gateway specified in ``last_accessed_gateway``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.GatewayConfig)
),
)
_sym_db.RegisterMessage(GatewayConfig)
DeviceRegistry = _reflection.GeneratedProtocolMessageType(
"DeviceRegistry",
(_message.Message,),
dict(
DESCRIPTOR=_DEVICEREGISTRY,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""A container for a group of devices.
Attributes:
id:
The identifier of this device registry. For example,
``myRegistry``.
name:
The resource path name. For example, ``projects/example-
project/locations/us-central1/registries/my-registry``.
event_notification_configs:
The configuration for notification of telemetry events
received from the device. All telemetry events that were
successfully published by the device and acknowledged by Cloud
IoT Core are guaranteed to be delivered to Cloud Pub/Sub. If
multiple configurations match a message, only the first
matching configuration is used. If you try to publish a device
telemetry event using MQTT without specifying a Cloud Pub/Sub
topic for the device's registry, the connection closes
automatically. If you try to do so using an HTTP connection,
an error is returned. Up to 10 configurations may be provided.
state_notification_config:
The configuration for notification of new states received from
the device. State updates are guaranteed to be stored in the
state history, but notifications to Cloud Pub/Sub are not
guaranteed. For example, if permissions are misconfigured or
the specified topic doesn't exist, no notification will be
published but the state will still be stored in Cloud IoT
Core.
mqtt_config:
The MQTT configuration for this device registry.
http_config:
The DeviceService (HTTP) configuration for this device
registry.
log_level:
\ **Beta Feature** The default logging verbosity for activity
from devices in this registry. The verbosity level can be
overridden by Device.log\_level.
credentials:
The credentials used to verify the device credentials. No more
than 10 credentials can be bound to a single registry at a
time. The verification process occurs at the time of device
creation or update. If this field is empty, no verification is
performed. Otherwise, the credentials of a newly created
device or added credentials of an updated device should be
signed with one of these registry credentials. Note, however,
that existing devices will never be affected by modifications
to this list of credentials: after a device has been
successfully created in a registry, it should be able to
connect even if its registry credentials are revoked, deleted,
or modified.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.DeviceRegistry)
),
)
_sym_db.RegisterMessage(DeviceRegistry)
MqttConfig = _reflection.GeneratedProtocolMessageType(
"MqttConfig",
(_message.Message,),
dict(
DESCRIPTOR=_MQTTCONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The configuration of MQTT for a device registry.
Attributes:
mqtt_enabled_state:
If enabled, allows connections using the MQTT protocol.
Otherwise, MQTT connections to this registry will fail.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.MqttConfig)
),
)
_sym_db.RegisterMessage(MqttConfig)
HttpConfig = _reflection.GeneratedProtocolMessageType(
"HttpConfig",
(_message.Message,),
dict(
DESCRIPTOR=_HTTPCONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The configuration of the HTTP bridge for a device
registry.
Attributes:
http_enabled_state:
If enabled, allows devices to use DeviceService via the HTTP
protocol. Otherwise, any requests to DeviceService will fail
for this registry.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.HttpConfig)
),
)
_sym_db.RegisterMessage(HttpConfig)
EventNotificationConfig = _reflection.GeneratedProtocolMessageType(
"EventNotificationConfig",
(_message.Message,),
dict(
DESCRIPTOR=_EVENTNOTIFICATIONCONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The configuration for forwarding telemetry events.
Attributes:
subfolder_matches:
If the subfolder name matches this string exactly, this
configuration will be used. The string must not include the
leading '/' character. If empty, all strings are matched. This
field is used only for telemetry events; subfolders are not
supported for state changes.
pubsub_topic_name:
A Cloud Pub/Sub topic name. For example,
``projects/myProject/topics/deviceEvents``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.EventNotificationConfig)
),
)
_sym_db.RegisterMessage(EventNotificationConfig)
StateNotificationConfig = _reflection.GeneratedProtocolMessageType(
"StateNotificationConfig",
(_message.Message,),
dict(
DESCRIPTOR=_STATENOTIFICATIONCONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The configuration for notification of new states received
from the device.
Attributes:
pubsub_topic_name:
A Cloud Pub/Sub topic name. For example,
``projects/myProject/topics/deviceEvents``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.StateNotificationConfig)
),
)
_sym_db.RegisterMessage(StateNotificationConfig)
RegistryCredential = _reflection.GeneratedProtocolMessageType(
"RegistryCredential",
(_message.Message,),
dict(
DESCRIPTOR=_REGISTRYCREDENTIAL,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""A server-stored registry credential used to validate
device credentials.
Attributes:
credential:
The credential data. Reserved for expansion in the future.
public_key_certificate:
A public key certificate used to verify the device
credentials.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.RegistryCredential)
),
)
_sym_db.RegisterMessage(RegistryCredential)
X509CertificateDetails = _reflection.GeneratedProtocolMessageType(
"X509CertificateDetails",
(_message.Message,),
dict(
DESCRIPTOR=_X509CERTIFICATEDETAILS,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""Details of an X.509 certificate. For informational
purposes only.
Attributes:
issuer:
The entity that signed the certificate.
subject:
The entity the certificate and public key belong to.
start_time:
The time the certificate becomes valid.
expiry_time:
The time the certificate becomes invalid.
signature_algorithm:
The algorithm used to sign the certificate.
public_key_type:
The type of public key in the certificate.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.X509CertificateDetails)
),
)
_sym_db.RegisterMessage(X509CertificateDetails)
PublicKeyCertificate = _reflection.GeneratedProtocolMessageType(
"PublicKeyCertificate",
(_message.Message,),
dict(
DESCRIPTOR=_PUBLICKEYCERTIFICATE,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""A public key certificate format and data.
Attributes:
format:
The certificate format.
certificate:
The certificate data.
x509_details:
[Output only] The certificate details. Used only for X.509
certificates.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.PublicKeyCertificate)
),
)
_sym_db.RegisterMessage(PublicKeyCertificate)
DeviceCredential = _reflection.GeneratedProtocolMessageType(
"DeviceCredential",
(_message.Message,),
dict(
DESCRIPTOR=_DEVICECREDENTIAL,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""A server-stored device credential used for authentication.
Attributes:
credential:
The credential data. Reserved for expansion in the future.
public_key:
A public key used to verify the signature of JSON Web Tokens
(JWTs). When adding a new device credential, either via device
creation or via modifications, this public key credential may
be required to be signed by one of the registry level
certificates. More specifically, if the registry contains at
least one certificate, any new device credential must be
signed by one of the registry certificates. As a result, when
the registry contains certificates, only X.509 certificates
are accepted as device credentials. However, if the registry
does not contain a certificate, self-signed certificates and
public keys will be accepted. New device credentials must be
different from every registry-level certificate.
expiration_time:
[Optional] The time at which this credential becomes invalid.
This credential will be ignored for new client authentication
requests after this timestamp; however, it will not be
automatically deleted.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.DeviceCredential)
),
)
_sym_db.RegisterMessage(DeviceCredential)
PublicKeyCredential = _reflection.GeneratedProtocolMessageType(
"PublicKeyCredential",
(_message.Message,),
dict(
DESCRIPTOR=_PUBLICKEYCREDENTIAL,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""A public key format and data.
Attributes:
format:
The format of the key.
key:
The key data.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.PublicKeyCredential)
),
)
_sym_db.RegisterMessage(PublicKeyCredential)
DeviceConfig = _reflection.GeneratedProtocolMessageType(
"DeviceConfig",
(_message.Message,),
dict(
DESCRIPTOR=_DEVICECONFIG,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The device configuration. Eventually delivered to devices.
Attributes:
version:
[Output only] The version of this update. The version number
is assigned by the server, and is always greater than 0 after
device creation. The version must be 0 on the ``CreateDevice``
request if a ``config`` is specified; the response of
``CreateDevice`` will always have a value of 1.
cloud_update_time:
[Output only] The time at which this configuration version was
updated in Cloud IoT Core. This timestamp is set by the
server.
device_ack_time:
[Output only] The time at which Cloud IoT Core received the
acknowledgment from the device, indicating that the device has
received this configuration version. If this field is not
present, the device has not yet acknowledged that it received
this version. Note that when the config was sent to the
device, many config versions may have been available in Cloud
IoT Core while the device was disconnected, and on connection,
only the latest version is sent to the device. Some versions
may never be sent to the device, and therefore are never
acknowledged. This timestamp is set by Cloud IoT Core.
binary_data:
The device configuration data.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.DeviceConfig)
),
)
_sym_db.RegisterMessage(DeviceConfig)
DeviceState = _reflection.GeneratedProtocolMessageType(
"DeviceState",
(_message.Message,),
dict(
DESCRIPTOR=_DEVICESTATE,
__module__="google.cloud.iot_v1.proto.resources_pb2",
__doc__="""The device state, as reported by the device.
Attributes:
update_time:
[Output only] The time at which this state version was updated
in Cloud IoT Core.
binary_data:
The device state data.
""",
# @@protoc_insertion_point(class_scope:google.cloud.iot.v1.DeviceState)
),
)
_sym_db.RegisterMessage(DeviceState)
DESCRIPTOR._options = None
_DEVICE_METADATAENTRY._options = None
_DEVICE._options = None
_DEVICEREGISTRY._options = None
# @@protoc_insertion_point(module_scope)
| 35.14228 | 5,950 | 0.631646 |
cae02678d281642787e348abb61dd143a3f06275 | 7,166 | py | Python | src/python/pants/bin/remote_pants_runner.py | jakubbujny/pants | e7fe73eaa3bc196d6d976e9f362bf60b69da17b3 | [
"Apache-2.0"
] | null | null | null | src/python/pants/bin/remote_pants_runner.py | jakubbujny/pants | e7fe73eaa3bc196d6d976e9f362bf60b69da17b3 | [
"Apache-2.0"
] | null | null | null | src/python/pants/bin/remote_pants_runner.py | jakubbujny/pants | e7fe73eaa3bc196d6d976e9f362bf60b69da17b3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import signal
import sys
import time
from builtins import object, str
from contextlib import contextmanager
from future.utils import raise_with_traceback
from pants.base.exception_sink import ExceptionSink
from pants.console.stty_utils import STTYSettings
from pants.java.nailgun_client import NailgunClient
from pants.java.nailgun_protocol import NailgunProtocol
from pants.pantsd.pants_daemon import PantsDaemon
from pants.util.collections import combined_dict
from pants.util.dirutil import maybe_read_file
logger = logging.getLogger(__name__)
class RemotePantsRunner(object):
"""A thin client variant of PantsRunner."""
class Fallback(Exception):
"""Raised when fallback to an alternate execution mode is requested."""
class Terminated(Exception):
"""Raised when an active run is terminated mid-flight."""
PANTS_COMMAND = 'pants'
RECOVERABLE_EXCEPTIONS = (
NailgunClient.NailgunConnectionError,
NailgunClient.NailgunExecutionError
)
def __init__(self, exiter, args, env, bootstrap_options, stdin=None, stdout=None, stderr=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param Options bootstrap_options: The Options bag containing the bootstrap options.
:param file stdin: The stream representing stdin.
:param file stdout: The stream representing stdout.
:param file stderr: The stream representing stderr.
"""
self._start_time = time.time()
self._exiter = exiter
self._args = args
self._env = env
self._bootstrap_options = bootstrap_options
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
@contextmanager
def _trapped_signals(self, client):
"""A contextmanager that overrides the SIGINT (control-c) and SIGQUIT (control-\) handlers
and handles them remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
# N.B. SIGQUIT will abruptly kill the pantsd-runner, which will shut down the other end
# of the Pailgun connection - so we send a gentler SIGINT here instead.
existing_sigquit_handler = signal.signal(signal.SIGQUIT, handle_control_c)
# Retry interrupted system calls.
signal.siginterrupt(signal.SIGINT, False)
signal.siginterrupt(signal.SIGQUIT, False)
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
signal.signal(signal.SIGQUIT, existing_sigquit_handler)
def _setup_logging(self):
"""Sets up basic stdio logging for the thin client."""
log_level = logging.getLevelName(self._bootstrap_options.for_global_scope().level.upper())
formatter = logging.Formatter('%(levelname)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(log_level)
handler.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(log_level)
root.addHandler(handler)
@staticmethod
def _backoff(attempt):
"""Minimal backoff strategy for daemon restarts."""
time.sleep(attempt + (attempt - 1))
def _run_pants_with_retry(self, pantsd_handle, retries=3):
"""Runs pants remotely with retry and recovery for nascent executions.
:param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to.
"""
attempt = 1
while 1:
logger.debug(
'connecting to pantsd on port {} (attempt {}/{})'
.format(pantsd_handle.port, attempt, retries)
)
try:
return self._connect_and_execute(pantsd_handle.port)
except self.RECOVERABLE_EXCEPTIONS as e:
if attempt > retries:
raise self.Fallback(e)
self._backoff(attempt)
logger.warn(
'pantsd was unresponsive on port {}, retrying ({}/{})'
.format(pantsd_handle.port, attempt, retries)
)
# One possible cause of the daemon being non-responsive during an attempt might be if a
# another lifecycle operation is happening concurrently (incl teardown). To account for
# this, we won't begin attempting restarts until at least 1 second has passed (1 attempt).
if attempt > 1:
pantsd_handle = self._restart_pantsd()
attempt += 1
except NailgunClient.NailgunError as e:
# Ensure a newline.
logger.fatal('')
logger.fatal('lost active connection to pantsd!')
raise_with_traceback(self._extract_remote_exception(pantsd_handle.pid, e))
def _connect_and_execute(self, port):
# Merge the nailgun TTY capability environment variables with the passed environment dict.
ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
modified_env = combined_dict(self._env, ng_env)
modified_env['PANTSD_RUNTRACKER_CLIENT_START_TIME'] = str(self._start_time)
assert isinstance(port, int), 'port {} is not an integer!'.format(port)
# Instantiate a NailgunClient.
client = NailgunClient(port=port,
ins=self._stdin,
out=self._stdout,
err=self._stderr,
exit_on_broken_pipe=True,
expects_pid=True)
with self._trapped_signals(client), STTYSettings.preserved():
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
# Exit.
self._exiter.exit(result)
def _extract_remote_exception(self, pantsd_pid, nailgun_error):
"""Given a NailgunError, returns a Terminated exception with additional info (where possible).
This method will include the entire exception log for either the `pid` in the NailgunError, or
failing that, the `pid` of the pantsd instance.
"""
sources = [pantsd_pid]
if nailgun_error.pid is not None:
sources = [abs(nailgun_error.pid)] + sources
exception_text = None
for source in sources:
log_path = ExceptionSink.exceptions_log_path(for_pid=source)
exception_text = maybe_read_file(log_path, binary_mode=False)
if exception_text:
break
exception_suffix = '\nRemote exception:\n{}'.format(exception_text) if exception_text else ''
return self.Terminated('abruptly lost active connection to pantsd runner: {!r}{}'.format(
nailgun_error, exception_suffix))
def _restart_pantsd(self):
return PantsDaemon.Factory.restart(bootstrap_options=self._bootstrap_options)
def _maybe_launch_pantsd(self):
return PantsDaemon.Factory.maybe_launch(bootstrap_options=self._bootstrap_options)
def run(self, args=None):
self._setup_logging()
pantsd_handle = self._maybe_launch_pantsd()
self._run_pants_with_retry(pantsd_handle)
| 37.715789 | 98 | 0.716578 |
cfdaf20807af4afb766ebdf962754b72658410cf | 2,496 | py | Python | pynaoqi-python2.7-2.5.5.5-linux64/lib/python2.7/site-packages/allog.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | pynaoqi-python2.7-2.5.5.5-linux64/lib/python2.7/site-packages/allog.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | pynaoqi-python2.7-2.5.5.5-linux64/lib/python2.7/site-packages/allog.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_allog', [dirname(__file__)])
except ImportError:
import _allog
return _allog
if fp is not None:
try:
_mod = imp.load_module('_allog', fp, pathname, description)
finally:
fp.close()
return _mod
_allog = swig_import_helper()
del swig_import_helper
else:
import _allog
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def debug(*args):
return _allog.debug(*args)
debug = _allog.debug
def info(*args):
return _allog.info(*args)
info = _allog.info
def warning(*args):
return _allog.warning(*args)
warning = _allog.warning
def error(*args):
return _allog.error(*args)
error = _allog.error
def fatal(*args):
return _allog.fatal(*args)
fatal = _allog.fatal
# This file is compatible with both classic and new-style classes.
| 26.273684 | 90 | 0.660657 |
5e0d3388e82c6825323e5d68ea6db9849d9dcce3 | 10,189 | py | Python | trestle/core/resolver/prune.py | deenine/compliance-trestle | 79b2f61baac7adbf69e1474a5e218d664bec65ff | [
"Apache-2.0"
] | null | null | null | trestle/core/resolver/prune.py | deenine/compliance-trestle | 79b2f61baac7adbf69e1474a5e218d664bec65ff | [
"Apache-2.0"
] | null | null | null | trestle/core/resolver/prune.py | deenine/compliance-trestle | 79b2f61baac7adbf69e1474a5e218d664bec65ff | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create resolved catalog from profile."""
import logging
import re
from typing import Dict, Iterator, List, Optional, Set
from uuid import uuid4
import trestle.oscal.catalog as cat
import trestle.oscal.profile as prof
from trestle.core.catalog_interface import CatalogInterface
from trestle.core.const import MARKDOWN_URL_REGEX, UUID_REGEX
from trestle.core.err import TrestleError
from trestle.core.pipeline import Pipeline
from trestle.core.utils import none_if_empty
from trestle.oscal import common
logger = logging.getLogger(__name__)
class Prune(Pipeline.Filter):
"""Prune the catalog based on the import include rule."""
def __init__(self, import_: prof.Import, profile: prof.Profile) -> None:
"""
Inject the import.
This needs to be created prior to knowing the catalog.
The profile itself is only needed for debug messages.
The import is one possibly several imports in that profile.
"""
self._import = import_
self._profile = profile
self._catalog_interface: Optional[CatalogInterface] = None
self._catalog: Optional[cat.Catalog] = None
def _set_catalog(self, catalog: cat.Catalog) -> None:
"""Set the catalog used by the catalog interface."""
self._catalog_interface = CatalogInterface(catalog)
self._catalog = catalog
def _find_uuid_refs(self, control_id: str) -> Set[str]:
"""
Find all needed resource refs buried in control links and prose.
For any controls retained in the resolved profile catalog, if any
prose references a document by uuid, that reference needs to be in backmatter.
"""
control = self._catalog_interface.get_control(control_id)
refs = set()
if control.links is not None:
for link in control.links:
uuid_str = link.href.replace('#', '')
refs.add(uuid_str)
if control.parts is not None:
for part in control.parts:
if part.prose is not None:
# find the two parts, label and ref, in each markdown url
# expecting form [label](#uuid)
# but if it is a control ref it may be e.g. [CM-7](#cm-7)
# for now label is not used
# the ref may be a uuid or control id
# currently only uuids are used to confirm needed presence in backmatter
# note that prose may be multi-line but findall searches all lines
matches = re.findall(MARKDOWN_URL_REGEX, part.prose)
for match in matches:
ref = match[1]
if len(ref) > 1 and ref[0] == '#':
uuid_match = re.findall(UUID_REGEX, ref[1:])
# there should be only one uuid in the parens
if uuid_match:
refs.add(uuid_match[0])
if control.controls is not None:
for sub_control in control.controls:
refs.update(self._find_uuid_refs(sub_control.id))
return refs
def _find_all_uuid_refs(self, needed_control_ids: List[str]) -> Set[str]:
"""Find all references needed by controls."""
refs = set()
for control_id in needed_control_ids:
refs.update(self._find_uuid_refs(control_id))
return refs
def _controls_selected(self, select_list: Optional[List[prof.SelectControlById]]) -> List[str]:
control_ids: List[str] = []
if select_list is not None:
for select_control in select_list:
if select_control.matching is not None:
raise TrestleError('Profiles with SelectControlById based on matching are not supported.')
include_children = select_control.with_child_controls == prof.WithChildControls.yes
if select_control.with_ids:
new_ids = [withid.__root__ for withid in select_control.with_ids]
for id_ in new_ids:
control_ids.append(id_)
if include_children:
control_ids.extend(self._catalog_interface.get_dependent_control_ids(id_))
return control_ids
def _find_needed_control_ids(self) -> List[str]:
"""Get list of control_ids needed by profile and corresponding groups."""
if self._import.include_controls is not None:
include_ids = self._controls_selected(self._import.include_controls)
else:
if self._import.include_all is None:
logger.warning('Profile does not specify include-controls, so including all.')
include_ids = self._catalog_interface.get_control_ids()
exclude_ids = self._controls_selected(self._import.exclude_controls)
if not set(include_ids).issuperset(set(exclude_ids)):
logger.debug(f'include_ids is not a superset of exclude_ids in import {self._import.href}')
return [id_ for id_ in include_ids if id_ not in exclude_ids]
def _prune_control(self, needed_ids: List[str], control: cat.Control, exclude_ids: List[str]) -> cat.Control:
"""
Prune the control based on the Import requirements.
This is only called if the control is needed
Some or all of its sub_controls may not be needed
This always returns the original control, possibly with fewer subcontrols
"""
if control.controls is None:
return control
controls = []
for sub_control in control.controls:
if sub_control.id in needed_ids and sub_control.id not in exclude_ids:
controls.append(self._prune_control(needed_ids, sub_control, exclude_ids))
exclude_ids.append(sub_control.id)
control.controls = none_if_empty(controls)
return control
def _prune_controls(self, needed_ids: List[str]) -> List[str]:
loaded_ids = []
final_ids: List[str] = []
for control_id in needed_ids:
if control_id not in loaded_ids:
control = self._catalog_interface.get_control(control_id)
if control is None:
msg = (
f'Profile titled "{self._profile.metadata.title}" references control {control_id} '
f'but it is not in catalog titled "{self._catalog.metadata.title}"'
)
raise TrestleError(msg)
control = self._prune_control(needed_ids, control, loaded_ids)
self._catalog_interface.replace_control(control)
loaded_ids.append(control_id)
final_ids.append(control_id)
return final_ids
def _prune_catalog(self) -> cat.Catalog:
"""Prune the controls in the current catalog."""
if self._import is None:
return self._catalog
needed_ids = self._find_needed_control_ids()
# if a control includes controls - only include those that we know are needed
final_control_ids = self._prune_controls(needed_ids)
cat_controls = []
# build the needed groups of controls
group_dict: Dict[str, cat.Group] = {}
for control_id in final_control_ids:
control = self._catalog_interface.get_control(control_id)
group_id, group_title, group_class = self._catalog_interface.get_group_info_by_control(control_id)
if not group_id:
cat_controls.append(control)
continue
group = group_dict.get(group_id)
if group is None:
group = cat.Group(id=group_id, title=group_title, class_=group_class, controls=[control])
group_dict[group_id] = group
else:
group_dict[group_id].controls.append(control)
# find all referenced uuids - they should be 1:1 with those in backmatter
needed_uuid_refs: Set[str] = self._find_all_uuid_refs(final_control_ids)
# prune the list of resources to only those that are needed
new_resources: Optional[List[common.Resource]] = []
if self._catalog.back_matter is not None and self._catalog.back_matter.resources is not None:
new_resources = [res for res in self._catalog.back_matter.resources if res.uuid in needed_uuid_refs]
new_groups: Optional[List[cat.Group]] = list(group_dict.values())
# should avoid empty lists so set to None if empty
new_resources = none_if_empty(new_resources)
new_groups = none_if_empty(new_groups)
cat_controls = none_if_empty(cat_controls)
new_cat = cat.Catalog(
uuid=str(uuid4()),
metadata=self._catalog.metadata,
back_matter=common.BackMatter(resources=new_resources),
controls=cat_controls,
groups=new_groups
)
return new_cat
def process(self, catalog_iter: Iterator[cat.Catalog]) -> Iterator[cat.Catalog]:
"""
Prune the catalog based on the include rule in the import_.
This only processes the one catalog yielded by the one import in this pipeline.
It must yield in order to have the merge filter loop over available imported catalogs.
"""
self._set_catalog(next(catalog_iter))
logger.debug(f'prune yielding catalog {self._catalog.metadata.title} with import {self._import.href}')
yield self._prune_catalog()
| 45.284444 | 113 | 0.641574 |
6cd20a68ab40e7cbe1f7b78275a1298bdd961248 | 935 | py | Python | selfdrive/loggerd/config.py | GratefulJinx77/openpilot-1 | f9af3ddf4c819127119943cafec6e46fe75a49af | [
"MIT"
] | 10 | 2021-10-09T15:32:24.000Z | 2022-02-14T08:31:56.000Z | selfdrive/loggerd/config.py | skyhanil1004/hanilpilot | 102738e4e0dd1702ab01b849e444bd1dfc2ddd1c | [
"MIT"
] | 51 | 2021-11-17T16:13:39.000Z | 2022-03-22T00:14:00.000Z | selfdrive/loggerd/config.py | skyhanil1004/hanilpilot | 102738e4e0dd1702ab01b849e444bd1dfc2ddd1c | [
"MIT"
] | 10 | 2021-11-29T20:45:56.000Z | 2022-03-28T20:15:41.000Z | import os
from pathlib import Path
from selfdrive.hardware import PC
if os.environ.get('LOG_ROOT', False):
ROOT = os.environ['LOG_ROOT']
elif PC:
ROOT = os.path.join(str(Path.home()), ".comma", "media", "0", "realdata")
else:
ROOT = '/data/media/0/realdata/'
CAMERA_FPS = 20
SEGMENT_LENGTH = 60
STATS_DIR_FILE_LIMIT = 10000
STATS_SOCKET = "ipc:///tmp/stats"
if PC:
STATS_DIR = os.path.join(str(Path.home()), ".comma", "stats")
else:
STATS_DIR = "/data/stats/"
STATS_FLUSH_TIME_S = 60
def get_available_percent(default=None):
try:
statvfs = os.statvfs(ROOT)
available_percent = 100.0 * statvfs.f_bavail / statvfs.f_blocks
except OSError:
available_percent = default
return available_percent
def get_available_bytes(default=None):
try:
statvfs = os.statvfs(ROOT)
available_bytes = statvfs.f_bavail * statvfs.f_frsize
except OSError:
available_bytes = default
return available_bytes
| 22.261905 | 75 | 0.716578 |
fc3a097177104ca2d7b564dd204debd1b8401f4a | 10,284 | py | Python | virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/crypto/acme/acme_certificate_revoke.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-03-29T18:41:01.000Z | 2020-03-29T18:41:01.000Z | ansible/ansible/modules/crypto/acme/acme_certificate_revoke.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 7 | 2020-09-07T17:27:56.000Z | 2022-03-02T06:25:46.000Z | ansible/ansible/modules/crypto/acme/acme_certificate_revoke.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 1 | 2020-10-30T12:48:24.000Z | 2020-10-30T12:48:24.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: acme_certificate_revoke
author: "Felix Fontein (@felixfontein)"
version_added: "2.7"
short_description: Revoke certificates with the ACME protocol
description:
- "Allows to revoke certificates issued by a CA supporting the
L(ACME protocol,https://tools.ietf.org/html/rfc8555),
such as L(Let's Encrypt,https://letsencrypt.org/)."
notes:
- "Exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "Trying to revoke an already revoked certificate
should result in an unchanged status, even if the revocation reason
was different than the one specified here. Also, depending on the
server, it can happen that some other error is returned if the
certificate has already been revoked."
seealso:
- name: The Let's Encrypt documentation
description: Documentation for the Let's Encrypt Certification Authority.
Provides useful information for example on rate limits.
link: https://letsencrypt.org/docs/
- name: Automatic Certificate Management Environment (ACME)
description: The specification of the ACME protocol (RFC 8555).
link: https://tools.ietf.org/html/rfc8555
- module: acme_inspect
description: Allows to debug problems.
extends_documentation_fragment:
- acme
options:
certificate:
description:
- "Path to the certificate to revoke."
type: path
required: yes
account_key_src:
description:
- "Path to a file containing the ACME account RSA or Elliptic Curve
key."
- "RSA keys can be created with C(openssl rsa ...). Elliptic curve keys can
be created with C(openssl ecparam -genkey ...). Any other tool creating
private keys in PEM format can be used as well."
- "Mutually exclusive with C(account_key_content)."
- "Required if C(account_key_content) is not used."
type: path
account_key_content:
description:
- "Content of the ACME account RSA or Elliptic Curve key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "I(Warning): the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
type: str
private_key_src:
description:
- "Path to the certificate's private key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
type: path
private_key_content:
description:
- "Content of the certificate's private key."
- "Note that exactly one of C(account_key_src), C(account_key_content),
C(private_key_src) or C(private_key_content) must be specified."
- "I(Warning): the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
type: str
revoke_reason:
description:
- "One of the revocation reasonCodes defined in
L(https://tools.ietf.org/html/rfc5280#section-5.3.1, Section 5.3.1 of RFC5280)."
- "Possible values are C(0) (unspecified), C(1) (keyCompromise),
C(2) (cACompromise), C(3) (affiliationChanged), C(4) (superseded),
C(5) (cessationOfOperation), C(6) (certificateHold),
C(8) (removeFromCRL), C(9) (privilegeWithdrawn),
C(10) (aACompromise)"
type: int
'''
EXAMPLES = '''
- name: Revoke certificate with account key
acme_certificate_revoke:
account_key_src: /etc/pki/cert/private/account.key
certificate: /etc/httpd/ssl/sample.com.crt
- name: Revoke certificate with certificate's private key
acme_certificate_revoke:
private_key_src: /etc/httpd/ssl/sample.com.key
certificate: /etc/httpd/ssl/sample.com.crt
'''
RETURN = '''
'''
from ansible.module_utils.acme import (
ModuleFailException, ACMEAccount, nopad_b64, pem_to_der, set_crypto_backend,
)
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
account_key_src=dict(type='path', aliases=['account_key']),
account_key_content=dict(type='str', no_log=True),
account_uri=dict(type='str'),
acme_directory=dict(type='str', default='https://acme-staging.api.letsencrypt.org/directory'),
acme_version=dict(type='int', default=1, choices=[1, 2]),
validate_certs=dict(type='bool', default=True),
private_key_src=dict(type='path'),
private_key_content=dict(type='str', no_log=True),
certificate=dict(type='path', required=True),
revoke_reason=dict(type='int'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'openssl', 'cryptography']),
),
required_one_of=(
['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'],
),
mutually_exclusive=(
['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'],
),
supports_check_mode=False,
)
set_crypto_backend(module)
if not module.params.get('validate_certs'):
module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' +
'This should only be done for testing against a local ACME server for ' +
'development purposes, but *never* for production purposes.')
try:
account = ACMEAccount(module)
# Load certificate
certificate = pem_to_der(module.params.get('certificate'))
certificate = nopad_b64(certificate)
# Construct payload
payload = {
'certificate': certificate
}
if module.params.get('revoke_reason') is not None:
payload['reason'] = module.params.get('revoke_reason')
# Determine endpoint
if module.params.get('acme_version') == 1:
endpoint = account.directory['revoke-cert']
payload['resource'] = 'revoke-cert'
else:
endpoint = account.directory['revokeCert']
# Get hold of private key (if available) and make sure it comes from disk
private_key = module.params.get('private_key_src')
private_key_content = module.params.get('private_key_content')
# Revoke certificate
if private_key or private_key_content:
# Step 1: load and parse private key
error, private_key_data = account.parse_key(private_key, private_key_content)
if error:
raise ModuleFailException("error while parsing private key: %s" % error)
# Step 2: sign revokation request with private key
jws_header = {
"alg": private_key_data['alg'],
"jwk": private_key_data['jwk'],
}
result, info = account.send_signed_request(endpoint, payload, key_data=private_key_data, jws_header=jws_header)
else:
# Step 1: get hold of account URI
created, account_data = account.setup_account(allow_creation=False)
if created:
raise AssertionError('Unwanted account creation')
if account_data is None:
raise ModuleFailException(msg='Account does not exist or is deactivated.')
# Step 2: sign revokation request with account key
result, info = account.send_signed_request(endpoint, payload)
if info['status'] != 200:
already_revoked = False
# Standarized error from draft 14 on (https://tools.ietf.org/html/rfc8555#section-7.6)
if result.get('type') == 'urn:ietf:params:acme:error:alreadyRevoked':
already_revoked = True
else:
# Hack for Boulder errors
if module.params.get('acme_version') == 1:
error_type = 'urn:acme:error:malformed'
else:
error_type = 'urn:ietf:params:acme:error:malformed'
if result.get('type') == error_type and result.get('detail') == 'Certificate already revoked':
# Fallback: boulder returns this in case the certificate was already revoked.
already_revoked = True
# If we know the certificate was already revoked, we don't fail,
# but successfully terminate while indicating no change
if already_revoked:
module.exit_json(changed=False)
raise ModuleFailException('Error revoking certificate: {0} {1}'.format(info['status'], result))
module.exit_json(changed=True)
except ModuleFailException as e:
e.do_fail(module)
if __name__ == '__main__':
main()
| 44.908297 | 123 | 0.655873 |
b8ffd72827931da4a759f7940e79201fd6001c1d | 5,447 | py | Python | colour/adaptation/tests/test_cie1994.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 1,380 | 2015-01-10T12:30:33.000Z | 2022-03-30T10:19:57.000Z | colour/adaptation/tests/test_cie1994.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 638 | 2015-01-02T10:49:05.000Z | 2022-03-29T10:16:22.000Z | colour/adaptation/tests/test_cie1994.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 250 | 2015-01-21T15:27:19.000Z | 2022-03-30T10:23:58.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.adaptation.cie1994` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.adaptation import chromatic_adaptation_CIE1994
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['TestChromaticAdaptationCIE1994']
class TestChromaticAdaptationCIE1994(unittest.TestCase):
"""
Defines :func:`colour.adaptation.cie1994.chromatic_adaptation_CIE1994`
definition unit tests methods.
"""
def test_chromatic_adaptation_CIE1994(self):
"""
Tests :func:`colour.adaptation.cie1994.chromatic_adaptation_CIE1994`
definition.
"""
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(
XYZ_1=np.array([28.00, 21.26, 5.27]),
xy_o1=np.array([0.44760, 0.40740]),
xy_o2=np.array([0.31270, 0.32900]),
Y_o=20,
E_o1=1000,
E_o2=1000),
np.array([24.03379521, 21.15621214, 17.64301199]),
decimal=7)
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(
XYZ_1=np.array([21.77, 19.18, 16.73]),
xy_o1=np.array([0.31270, 0.32900]),
xy_o2=np.array([0.31270, 0.32900]),
Y_o=50,
E_o1=100,
E_o2=1000),
np.array([21.12891746, 19.42980532, 19.49577765]),
decimal=7)
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(
XYZ_1=np.array([0.07818780, 0.06157201, 0.28099326]) * 100,
xy_o1=np.array([0.31270, 0.32900]),
xy_o2=np.array([0.37208, 0.37529]),
Y_o=20,
E_o1=100,
E_o2=1000),
np.array([9.14287406, 9.35843355, 15.95753504]),
decimal=7)
def test_n_dimensional_chromatic_adaptation_CIE1994(self):
"""
Tests :func:`colour.adaptation.cie1994.chromatic_adaptation_CIE1994`
definition n-dimensional arrays support.
"""
XYZ_1 = np.array([28.00, 21.26, 5.27])
xy_o1 = np.array([0.44760, 0.40740])
xy_o2 = np.array([0.31270, 0.32900])
Y_o = 20
E_o1 = 1000
E_o2 = 1000
XYZ_2 = chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1,
E_o2)
XYZ_1 = np.tile(XYZ_1, (6, 1))
XYZ_2 = np.tile(XYZ_2, (6, 1))
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2),
XYZ_2,
decimal=7)
xy_o1 = np.tile(xy_o1, (6, 1))
xy_o2 = np.tile(xy_o2, (6, 1))
Y_o = np.tile(Y_o, 6)
E_o1 = np.tile(E_o1, 6)
E_o2 = np.tile(E_o2, 6)
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2),
XYZ_2,
decimal=7)
XYZ_1 = np.reshape(XYZ_1, (2, 3, 3))
xy_o1 = np.reshape(xy_o1, (2, 3, 2))
xy_o2 = np.reshape(xy_o2, (2, 3, 2))
Y_o = np.reshape(Y_o, (2, 3))
E_o1 = np.reshape(E_o1, (2, 3))
E_o2 = np.reshape(E_o2, (2, 3))
XYZ_2 = np.reshape(XYZ_2, (2, 3, 3))
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2),
XYZ_2,
decimal=7)
def test_domain_range_scale_chromatic_adaptation_CIE1994(self):
"""
Tests :func:`colour.adaptation.cie1994.chromatic_adaptation_CIE1994`
definition domain and range scale support.
"""
XYZ_1 = np.array([28.00, 21.26, 5.27])
xy_o1 = np.array([0.44760, 0.40740])
xy_o2 = np.array([0.31270, 0.32900])
Y_o = 20
E_o1 = 1000
E_o2 = 1000
XYZ_2 = chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1,
E_o2)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
chromatic_adaptation_CIE1994(XYZ_1 * factor, xy_o1, xy_o2,
Y_o * factor, E_o1, E_o2),
XYZ_2 * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_chromatic_adaptation_CIE1994(self):
"""
Tests :func:`colour.adaptation.cie1994.chromatic_adaptation_CIE1994`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ_1 = np.array(case)
xy_o1 = np.array(case[0:2])
xy_o2 = np.array(case[0:2])
Y_o = case[0]
E_o1 = case[0]
E_o2 = case[0]
chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2)
if __name__ == '__main__':
unittest.main()
| 34.474684 | 79 | 0.562695 |
3c3a626496112fb58dd3a16ff08ee5763a6c5fe4 | 20,652 | py | Python | chrisapp/base.py | FNNDSC/chrisapp | dcc6e4493b1592f2dff09e316847fe7e02e7dd42 | [
"MIT"
] | 1 | 2018-03-23T19:26:53.000Z | 2018-03-23T19:26:53.000Z | chrisapp/base.py | FNNDSC/chrisapp | dcc6e4493b1592f2dff09e316847fe7e02e7dd42 | [
"MIT"
] | 9 | 2018-03-16T18:56:03.000Z | 2021-04-15T03:40:09.000Z | chrisapp/base.py | FNNDSC/chrisapp | dcc6e4493b1592f2dff09e316847fe7e02e7dd42 | [
"MIT"
] | 5 | 2018-03-03T00:09:19.000Z | 2020-10-19T16:33:01.000Z | """
/**
*
* sSSs .S S. .S_sSSs .S sSSs
* d%%SP .SS SS. .SS~YS%%b .SS d%%SP
* d%S' S%S S%S S%S `S%b S%S d%S'
* S%S S%S S%S S%S S%S S%S S%|
* S&S S%S SSSS%S S%S d* S S&S S&S
* S&S S&S SSS&S S&S .S* S S&S Y&Ss
* S&S S&S S&S S&S_sdSSS S&S `S&&S
* S&S S&S S&S S&S~YSY%b S&S `S*S
* S*b S*S S*S S*S `S%b S*S l*S
* S*S. S*S S*S S*S S%S S*S .S*P
* SSSbs S*S S*S S*S S&S S*S sSS*S
* YSSP SSS S*S S*S SSS S*S YSS'
* SP SP SP
* Y Y Y
*
* U L T R O N
*
* (c) 2016-2021 Fetal-Neonatal Neuroimaging & Developmental Science Center
* Boston Children's Hospital
*
* http://childrenshospital.org/FNNDSC/
* dev@babyMRI.org
*
*/
"""
import os
import sys
import shutil
from argparse import Action, ArgumentParser, ArgumentTypeError
import json
try:
# since python>=3.8, importlib.metadata is part of the standard library
from importlib.metadata import Distribution
except ModuleNotFoundError:
# for python<=3.7, a backport is used
from importlib_metadata import Distribution
class NoArgAction(Action):
"""
Base class for action classes that do not have arguments.
"""
def __init__(self, *args, **kwargs):
kwargs['nargs'] = 0
Action.__init__(self, *args, **kwargs)
class JsonAction(NoArgAction):
"""
Custom action class to bypass required positional arguments when printing the app's
JSON representation.
"""
def __call__(self, parser, namespace, values, option_string=None):
print(json.dumps(parser.get_json_representation()))
parser.exit()
class SaveJsonAction(Action):
"""
Custom action class to bypass required positional arguments when saving the app's JSON
representation to a file.
"""
def __call__(self, parser, namespace, values, option_string=None):
parser.save_json_representation(values)
parser.exit()
class ManPageAction(NoArgAction):
"""
Custom action class to bypass required positional arguments when showing the app's
man page.
"""
def __call__(self, parser, namespace, values, option_string=None):
parser.show_man_page()
parser.exit()
class AppMetaDataAction(NoArgAction):
"""
Custom action class to bypass required positional arguments when printing the app's
meta data.
"""
def __call__(self, parser, namespace, values, option_string=None):
parser.print_app_meta_data()
parser.exit()
class BaseClassAttrEnforcer(type):
"""
Meta class to enforce class variables in subclasses.
"""
def __init__(cls, name, bases, d):
if 'PACKAGE' in d:
# interrogate setup.py to automatically fill in some
# class attributes for the subclass
autofill = ['AUTHORS', 'DESCRIPTION', 'LICENSE', 'DOCUMENTATION', 'VERSION']
for attr in autofill:
if attr in d:
raise ValueError(
'Do not manually set value value for '
f'"{attr}" when "PACKAGE={d["PACKAGE"]}" is declared')
pkg = Distribution.from_name(d['PACKAGE'])
setup = pkg.metadata
if 'TITLE' not in d:
cls.TITLE = setup['name']
d['TITLE'] = cls.TITLE
cls.AUTHORS = f'{setup["author"]} <{setup["author-email"]}>'
d['AUTHORS'] = cls.AUTHORS
cls.DESCRIPTION = setup['summary']
d['DESCRIPTION'] = cls.DESCRIPTION
cls.LICENSE = setup['license']
d['LICENSE'] = cls.LICENSE
cls.DOCUMENTATION = setup['home-page']
d['DOCUMENTATION'] = cls.DOCUMENTATION
cls.VERSION = setup['version']
d['VERSION'] = cls.VERSION
if 'SELFEXEC' not in d:
eps = [ep for ep in pkg.entry_points if ep.group == 'console_scripts']
if eps:
if len(eps) > 1:
# multiple console_scripts found but maybe
# they're just the same thing
different_scripts = [ep for ep in eps if ep.value != eps[0].value]
if different_scripts:
raise ValueError(
'SELFEXEC not defined and more than one '
'console_scripts found')
cls.SELFEXEC = eps[0].name
d['SELFEXEC'] = cls.SELFEXEC
cls.EXECSHELL = sys.executable
d['EXECSHELL'] = cls.EXECSHELL
script_location = shutil.which(cls.SELFEXEC)
if not script_location:
raise EnvironmentError(cls.SELFEXEC + ' not found in PATH - check your SELFEXEC')
cls.SELFPATH = os.path.dirname(script_location)
d['SELFPATH'] = cls.SELFPATH
script_location = os.path.join(cls.SELFPATH, cls.SELFEXEC)
if not os.path.isfile(script_location):
raise EnvironmentError(script_location + ' not found - check your SELFPATH, SELFEXEC')
# class variables to be enforced in the subclasses
attrs = [
'DESCRIPTION', 'TYPE', 'TITLE', 'LICENSE', 'AUTHORS', 'VERSION',
'SELFPATH', 'SELFEXEC', 'EXECSHELL'
]
for attr in attrs:
if attr not in d:
raise ValueError(f"Class {name} doesn't define {attr} class variable")
if type(d[attr]) is not str:
raise ValueError(f'{attr} ({type(attr)}) must be a string')
if 'OUTPUT_META_DICT' not in d:
raise ValueError(f"Class {name} doesn't define OUTPUT_META_DICT")
if type(d['OUTPUT_META_DICT']) is not dict:
raise ValueError('OUTPUT_META_DICT must be dict')
type.__init__(cls, name, bases, d)
class ChrisApp(ArgumentParser, metaclass=BaseClassAttrEnforcer):
"""
The superclass for all ChRIS plugin apps.
Meta-information about the subclass must be given as class attributes.
This is enforced by a metaclass.
Subclasses should manually define
AUTHORS, TITLE, DESCRIPTION, LICENSE, DOCUMENTATION, VERSION,
SELFPATH, SELFEXEC, EXECSHELL
Or, the metaclass can interrogate setup.py to discover the information
automatically. Enable this feature by setting
PACKAGE = __package__
The following class variables *must* be supplied, as they cannot
be discovered from setup.py
TYPE, OUTPUT_META_DICT
"""
AUTHORS = 'FNNDSC (dev@babyMRI.org)'
TITLE = ''
CATEGORY = ''
TYPE = 'ds'
ICON = ''
SELFPATH = ''
SELFEXEC = ''
EXECSHELL = ''
DESCRIPTION = ''
DOCUMENTATION = ''
LICENSE = ''
VERSION = ''
MAX_NUMBER_OF_WORKERS = 1
"""Integer value"""
MIN_NUMBER_OF_WORKERS = 1
"""Integer value"""
MAX_CPU_LIMIT = ''
"""millicore value as string, e.g. '2000m'"""
MIN_CPU_LIMIT = ''
"""millicore value as string, e.g. '2000m'"""
MAX_MEMORY_LIMIT = ''
"""string, e.g. '1Gi', '2000Mi'"""
MIN_MEMORY_LIMIT = ''
"""string, e.g. '1Gi', '2000Mi'"""
MIN_GPU_LIMIT = 0
"""number of GPUs"""
MAX_GPU_LIMIT = 0
"""number of GPUs"""
OUTPUT_META_DICT = {}
def __init__(self):
"""
The constructor of this app.
"""
ArgumentParser.__init__(self, description=self.DESCRIPTION)
# the custom parameter list
self._parameters = []
ArgumentParser.add_argument(self, '--json', action=JsonAction, dest='json',
default=False,
help='show json representation of app and exit')
ArgumentParser.add_argument(self, '--savejson', action=SaveJsonAction,
type=ChrisApp.path, dest='savejson', metavar='DIR',
help='save json representation file to DIR and exit')
ArgumentParser.add_argument(self, '--inputmeta', action='store', dest='inputmeta',
help='meta data file containing the arguments passed '
'to this app')
ArgumentParser.add_argument(self, '--saveinputmeta', action='store_true',
dest='saveinputmeta',
help='save arguments to a JSON file')
ArgumentParser.add_argument(self, '--saveoutputmeta', action='store_true',
dest='saveoutputmeta',
help='save output meta data to a JSON file')
ArgumentParser.add_argument(self, '--version', action='version',
version=self.get_version())
ArgumentParser.add_argument(self, '--meta', action=AppMetaDataAction,
dest='meta', default=False,
help='print app meta data and exit')
ArgumentParser.add_argument(self, '--man', action=ManPageAction,
dest='man', default=False,
help="show the app's man page and exit")
# 'ds' plugins require an input directory positional argument
if self.TYPE == 'ds':
ArgumentParser.add_argument(self, 'inputdir', action='store', type=str,
help='directory containing the input files')
# all plugins require an output directory positional argument
ArgumentParser.add_argument(self, 'outputdir', action='store', type=str,
help='directory containing the output files/folders')
# topological plugin's especial parameters
if self.TYPE == 'ts':
self.add_argument('--plugininstances', dest='plugininstances', type=str,
optional=True, default='',
help='string representing a comma-separated list of plugin '
'instance ids')
self.add_argument('-f', '--filter', dest='filter', type=str, optional=True,
default='',
help="string representing a comma-separated list of "
"regular expressions to filter each of the input "
"plugin instances' output dirs")
self.add_argument('-g', '--groupByInstance', dest='groupByInstance',
type=bool, optional=True, default=False,
help="if set then an output directory is created for each "
"input plugin instance within this plugin app's "
"output path")
self.define_parameters()
@staticmethod
def path(string):
"""
Define the 'path' data type that can be used by apps.
It's a string representing a list of paths separated by commas.
"""
path_list = [s.strip() for s in string.split(',')]
for path in path_list:
if not os.path.exists(path):
raise ArgumentTypeError("Path %s not found!" % path)
return ','.join(path_list)
@staticmethod
def unextpath(string):
"""
Define the 'unextpath' data type that can be used by apps.
It's a string representing a list of paths separated by commas. Unlike the
'path' data type this type means that files won't be extracted from object
storage.
"""
path_list = [s.strip() for s in string.split(',')]
return ','.join(path_list)
def show_man_page(self):
"""
Show the app's man page (abstract method in this class).
"""
pass
def define_parameters(self):
"""
Define the parameters used by this app (abstract method in this class).
"""
raise NotImplementedError("ChrisApp.define_parameters(self)")
def run(self, options):
"""
Execute this app (abstract method in this class).
"""
raise NotImplementedError("ChrisApp.run(self, options)")
def add_argument(self, *args, **kwargs):
"""
Overriden to add a new parameter to this app.
"""
if not (('action' in kwargs) and (kwargs['action'] == 'help')):
self.validate_argument_options(**kwargs)
# set required, default, ui_exposed and help values
optional = kwargs['optional']
if 'required' not in kwargs:
kwargs['required'] = not optional
default = kwargs['default'] if 'default' in kwargs else None
param_help = kwargs['help'] if 'help' in kwargs else ''
ui_exposed = kwargs['ui_exposed'] if 'ui_exposed' in kwargs else True
# set the ArgumentParser's action
param_type = kwargs['type']
action = 'store'
if param_type == bool:
action = 'store_false' if default else 'store_true'
# 'default' and 'type' options not allowed for boolean actions
if 'default' in kwargs:
del kwargs['default']
del kwargs['type']
kwargs['action'] = action
# set the flag
short_flag = flag = args[0]
if len(args) > 1:
if args[0].startswith('--'):
short_flag = args[1]
else:
flag = args[1]
# store the parameter internally
# use param_type.__name__ instead of param_type to enable json serialization
name = kwargs['dest']
param = {'name': name, 'type': param_type.__name__, 'optional': optional,
'flag': flag, 'short_flag': short_flag, 'action': action,
'help': param_help, 'default': default, 'ui_exposed': ui_exposed}
self._parameters.append(param)
# remove custom options before calling superclass method
del kwargs['optional']
if 'ui_exposed' in kwargs:
del kwargs['ui_exposed']
ArgumentParser.add_argument(self, *args, **kwargs)
def validate_argument_options(self, **kwargs):
"""
Validate argument's options passed as kwargs.
"""
# make sure required parameter options are defined
try:
name = kwargs['dest']
optional = kwargs['optional']
param_type = kwargs['type']
except KeyError as e:
raise KeyError("%s option required." % e)
# 'optional' (our custom flag) and 'required' (from argparse) should agree
if ('required' in kwargs) and (kwargs['required'] == optional):
raise KeyError("Values for 'required' and 'optional' contradict for "
"parameter %s." % name)
if param_type not in (str, int, float, bool, ChrisApp.path,
ChrisApp.unextpath):
raise ValueError("Unsupported type: '%s'" % param_type)
if optional:
if param_type in (ChrisApp.path, ChrisApp.unextpath):
raise ValueError("Parameters of type 'path' or 'unextpath' cannot "
"be optional.")
if 'default' not in kwargs:
raise KeyError("A default value is required for optional parameter"
" %s." % name)
if kwargs['default'] is None:
raise ValueError("Default value cannot be 'None' for optional "
"parameter %s." % name)
ui_exposed = kwargs['ui_exposed'] if 'ui_exposed' in kwargs else True
if not ui_exposed and not optional:
raise ValueError("Parameter %s is not optional and therefore must be "
"exposed to the UI." % name)
def get_json_representation(self):
"""
Return a JSON object with a representation of this app (type and parameters).
"""
representation = {'type': self.TYPE,
'parameters': self._parameters,
'icon': self.ICON,
'authors': self.AUTHORS,
'title': self.TITLE,
'category': self.CATEGORY,
'description': self.DESCRIPTION,
'documentation': self.DOCUMENTATION,
'license': self.LICENSE,
'version': self.VERSION,
'selfpath': self.SELFPATH,
'selfexec': self.SELFEXEC,
'execshell': self.EXECSHELL,
'max_number_of_workers': self.MAX_NUMBER_OF_WORKERS,
'min_number_of_workers': self.MIN_NUMBER_OF_WORKERS,
'max_memory_limit': self.MAX_MEMORY_LIMIT,
'min_memory_limit': self.MIN_MEMORY_LIMIT,
'max_cpu_limit': self.MAX_CPU_LIMIT,
'min_cpu_limit': self.MIN_CPU_LIMIT,
'max_gpu_limit':self.MAX_GPU_LIMIT,
'min_gpu_limit': self.MIN_GPU_LIMIT
}
return representation
def save_json_representation(self, dir_path):
"""
Save the app's JSON representation object to a JSON file.
"""
file_name = self.__class__.__name__+ '.json'
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'w') as outfile:
json.dump(self.get_json_representation(), outfile)
def launch(self, args=None):
"""
Trigger the parsing of arguments.
"""
self.options = self.parse_args(args)
if self.options.saveinputmeta:
# save original input options
self.save_input_meta()
if self.options.inputmeta:
# read new options from JSON file
self.options = self.get_options_from_file(self.options.inputmeta)
self.run(self.options)
# if required save meta data for the output after running the plugin app
if self.options.saveoutputmeta:
self.save_output_meta()
def get_options_from_file(self, file_path):
"""
Return the options parsed from a JSON file.
"""
# read options JSON file
with open(file_path) as options_file:
options_dict = json.load(options_file)
options = []
for opt_name in options_dict:
options.append(opt_name)
options.append(options_dict[opt_name])
return self.parse_args(options)
def save_input_meta(self):
"""
Save the input meta data (options passed to the app) to a JSON file.
"""
options = self.options
file_path = os.path.join(options.outputdir, 'input.meta.json')
with open(file_path, 'w') as outfile:
json.dump(vars(options), outfile)
def save_output_meta(self):
"""
Save descriptive output meta data to a JSON file.
"""
options = self.options
file_path = os.path.join(options.outputdir, 'output.meta.json')
with open(file_path, 'w') as outfile:
json.dump(self.OUTPUT_META_DICT, outfile)
def load_output_meta(self):
"""
Load descriptive output meta data from a JSON file in the input directory.
"""
options = self.options
file_path = os.path.join(options.inputdir, 'output.meta.json')
with open(file_path) as infile:
return json.load(infile)
def get_version(self) -> str:
"""
Return the app's version.
"""
return self.VERSION
def print_app_meta_data(self):
"""
Print the app's meta data.
"""
meta_data = dir(self)
class_var = [x for x in meta_data if x.isupper()]
for str_var in class_var:
str_val = getattr(self, str_var)
print("%20s: %s" % (str_var, str_val))
| 40.178988 | 105 | 0.545661 |
3bc203e3fb2cb26e26026816ba058d013d2d350b | 5,791 | py | Python | tests/test_swf.py | clbarnes/imageio | 73dadda9d93f9e2c4724095f91a2acd4ecd13c4a | [
"BSD-2-Clause"
] | 1 | 2022-03-24T22:40:13.000Z | 2022-03-24T22:40:13.000Z | tests/test_swf.py | clbarnes/imageio | 73dadda9d93f9e2c4724095f91a2acd4ecd13c4a | [
"BSD-2-Clause"
] | null | null | null | tests/test_swf.py | clbarnes/imageio | 73dadda9d93f9e2c4724095f91a2acd4ecd13c4a | [
"BSD-2-Clause"
] | 1 | 2018-12-29T22:19:50.000Z | 2018-12-29T22:19:50.000Z | """ Tests for the shockwave flash plugin
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
import imageio
from imageio import core
from imageio.core import get_remote_file
test_dir = get_test_dir()
mean = lambda x: x.sum() / x.size # pypy-compat mean
# We use need_internet; don't ship the swf image: its rather big and a
# rather specific format
def test_format_selection():
need_internet()
fname1 = get_remote_file('images/stent.swf', test_dir)
fname2 = fname1[:-4] + '.out.swf'
F = imageio.formats['swf']
assert F.name == 'SWF'
assert imageio.formats['.swf'] is F
assert imageio.read(fname1).format is F
assert imageio.save(fname2).format is F
def test_reading_saving():
need_internet()
fname1 = get_remote_file('images/stent.swf', test_dir)
fname2 = fname1[:-4] + '.out.swf'
fname3 = fname1[:-4] + '.compressed.swf'
fname4 = fname1[:-4] + '.out2.swf'
# Read
R = imageio.read(fname1)
assert len(R) == 10
assert R.get_meta_data() == {} # always empty dict
ims1 = []
for im in R:
assert im.shape == (657, 451, 4)
assert mean(im) > 0
ims1.append(im)
# Seek
assert (R.get_data(3) == ims1[3]).all()
# Fails
raises(IndexError, R.get_data, -1) # No negative index
raises(IndexError, R.get_data, 10) # Out of bounds
R.close()
# Test loop
R = imageio.read(fname1, loop=True)
assert (R.get_data(10) == ims1[0]).all()
# setting meta data is ignored
W = imageio.save(fname2)
W.set_meta_data({'foo': 3})
W.close()
# Write and re-read, now without loop, and with html page
imageio.mimsave(fname2, ims1, loop=False, html=True)
ims2 = imageio.mimread(fname2)
# Check images. We can expect exact match, since
# SWF is lossless.
assert len(ims1) == len(ims2)
for im1, im2 in zip(ims1, ims2):
assert (im1 == im2).all()
# Test compressed
imageio.mimsave(fname3, ims2, compress=True)
ims3 = imageio.mimread(fname3)
assert len(ims1) == len(ims3)
for im1, im3 in zip(ims1, ims3):
assert (im1 == im3).all()
# Test conventional, Bonus, we don't officially support this.
_swf = imageio.plugins.swf.load_lib()
_swf.write_swf(fname4, ims1)
ims4 = _swf.read_swf(fname4)
assert len(ims1) == len(ims4)
for im1, im4 in zip(ims1, ims4):
assert (im1 == im4).all()
# We want to manually validate that this file plays in 3d party tools
# So we write a small HTML5 doc that we can load
html = """<!DOCTYPE html>
<html>
<body>
Original:
<embed src="%s">
<br ><br >
Written:
<embed src="%s">
<br ><br >
Compressed:
<embed src="%s">
<br ><br >
Written 2:
<embed src="%s">
</body>
</html>
""" % (fname1, fname2, fname3, fname4)
with open(os.path.join(test_dir, 'test_swf.html'), 'wb') as f:
for line in html.splitlines():
f.write(line.strip().encode('utf-8') + b'\n')
def test_read_from_url():
need_internet()
burl = 'https://raw.githubusercontent.com/imageio/imageio-binaries/master/'
url = burl + 'images/stent.swf'
ims = imageio.mimread(url)
assert len(ims) == 10
def test_invalid():
need_internet()
fname1 = get_remote_file('images/stent.swf', test_dir)
fname2 = fname1[:-4] + '.invalid.swf'
# Empty file
with open(fname2, 'wb'):
pass
assert not imageio.formats.search_read_format(core.Request(fname2, 'rI'))
raises(IOError, imageio.mimread, fname2, 'swf')
# File with BS data
with open(fname2, 'wb') as f:
f.write(b'x'*100)
assert not imageio.formats.search_read_format(core.Request(fname2, 'rI'))
raises(IOError, imageio.mimread, fname2, 'swf')
def test_lowlevel():
need_internet()
# Some tests from low level implementation that is not covered
# by using the plugin itself.
_swf = imageio.plugins.swf.load_lib()
tag = _swf.Tag()
raises(NotImplementedError, tag.process_tag)
assert tag.make_matrix_record() == '00000000'
assert tag.make_matrix_record(scale_xy=(1, 1))
assert tag.make_matrix_record(rot_xy=(1, 1))
assert tag.make_matrix_record(trans_xy=(1, 1))
SetBackgroundTag = _swf.SetBackgroundTag
assert SetBackgroundTag(1, 2, 3).rgb == SetBackgroundTag((1, 2, 3)).rgb
tag = _swf.ShapeTag(0, (0, 0), (1, 1))
assert tag.make_style_change_record(1, 1, (1, 1))
assert tag.make_style_change_record()
assert (tag.make_straight_edge_record(2, 3).tobytes() ==
tag.make_straight_edge_record((2, 3)).tobytes())
def test_types():
need_internet()
fname1 = get_remote_file('images/stent.swf', test_dir)
fname2 = fname1[:-4] + '.out3.swf'
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64]:
for shape in [(100, 1), (100, 3)]:
# Repeats an identity matrix, just for testing
im1 = np.dstack((np.identity(shape[0], dtype=dtype), )*shape[1])
imageio.mimsave(fname2, [im1], 'swf')
im2 = imageio.mimread(fname2, 'swf')[0]
assert im2.shape == (100, 100, 4)
assert im2.dtype == np.uint8
if len(shape) == 3 and dtype == np.uint8:
assert (im1[:, :, 0] == im2[:, :, 0]).all()
run_tests_if_main()
| 28.668317 | 79 | 0.594371 |
e09d660dedaaeb950a7a12d73b8431768c2a5ac8 | 1,785 | py | Python | Quest-3/Challenge-3/main.py | MatejMecka/Stellar-Quest-Python | c6911c6f144e878af354ca0af121372cb3824b8a | [
"MIT"
] | 11 | 2020-10-22T10:49:07.000Z | 2022-03-18T21:09:02.000Z | Quest-3/Challenge-3/main.py | MatejMecka/Stellar-Quest-Python | c6911c6f144e878af354ca0af121372cb3824b8a | [
"MIT"
] | null | null | null | Quest-3/Challenge-3/main.py | MatejMecka/Stellar-Quest-Python | c6911c6f144e878af354ca0af121372cb3824b8a | [
"MIT"
] | 5 | 2021-01-06T12:42:41.000Z | 2021-11-18T05:05:46.000Z | """
Challenge 3: Submit a hash signed transaction
"""
from stellar_sdk import Server, Keypair, TransactionBuilder, Network
import requests
import hashlib
import binascii
# 1. Load Keys
server = Server("https://horizon-testnet.stellar.org")
stellar_quest_keypair = Keypair.from_secret("Shhhhhh")
quest_account_pub_key = stellar_quest_keypair.public_key
quest_account_priv_key = stellar_quest_keypair.secret
key_value = "Figure out the hint ;)"
h = hashlib.sha256(key_value)
hash_X = h.hexdigest()
# 2. Fund Account
print("Funding account...")
url = 'https://friendbot.stellar.org'
response = requests.get(url, params={'addr': stellar_quest_keypair.public_key})
print(f"Friendbot responded with {response}")
print("Building Transaction...")
base_fee = server.fetch_base_fee()
stellar_account = server.load_account(quest_account_pub_key)
# 3. Add HashX Signer
transaction = (
TransactionBuilder(
source_account=stellar_account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_hashx_signer(
hash_X,
2
)
)
transaction = transaction.build()
print('Signing Transaction...')
transaction.sign(quest_account_priv_key)
response = server.submit_transaction(transaction)
print(f"This is the response from adding the signer: {response}")
# 4. Remove HashX Signer
transaction = (
TransactionBuilder(
source_account=stellar_account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_hashx_signer(
hash_X,
0
)
)
transaction = transaction.build()
print('Signing Transaction...')
transaction.sign_hashx(key_value)
response = server.submit_transaction(transaction)
print(f"This is the final response: {response}")
| 24.452055 | 79 | 0.748459 |
63739a274111a2f58d6f946bd1c35919ec4a22dd | 489 | py | Python | testing/registry/thread_counter/lambda_func.py | open-lambda/s19-lambda | 0c210d5fcb50e87ac8a037c81092f7dd64b5c022 | [
"Apache-2.0"
] | 3 | 2019-04-09T20:52:48.000Z | 2020-12-07T01:52:22.000Z | testing/registry/thread_counter/lambda_func.py | open-lambda/s19-lambda | 0c210d5fcb50e87ac8a037c81092f7dd64b5c022 | [
"Apache-2.0"
] | 3 | 2019-03-27T08:41:31.000Z | 2019-06-04T02:06:09.000Z | testing/registry/thread_counter/lambda_func.py | open-lambda/s19-lambda | 0c210d5fcb50e87ac8a037c81092f7dd64b5c022 | [
"Apache-2.0"
] | 1 | 2019-03-28T03:26:10.000Z | 2019-03-28T03:26:10.000Z | from threading import *
import time, sys
t = None
def worker():
counter = 0
while True:
print 'counter=%d' % counter
sys.stdout.flush()
counter += 1
time.sleep(0.001)
def handler(event):
global t
if t == None:
print 'Init worker thread'
t = Thread(target=worker)
t.start()
time.sleep(0.1)
return 'Background thread started'
def main():
print handler(None, None)
if __name__ == '__main__':
main()
| 17.464286 | 38 | 0.576687 |
bdb96a0464fe81a4e00a569b5f0fa318f48af51d | 11,520 | py | Python | isi_sdk_8_2_1/isi_sdk_8_2_1/models/node_state_readonly_node.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_1/isi_sdk_8_2_1/models/node_state_readonly_node.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_1/isi_sdk_8_2_1/models/node_state_readonly_node.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NodeStateReadonlyNode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allowed': 'bool',
'enabled': 'bool',
'error': 'str',
'id': 'int',
'lnn': 'int',
'mode': 'bool',
'status': 'str',
'valid': 'bool',
'value': 'int'
}
attribute_map = {
'allowed': 'allowed',
'enabled': 'enabled',
'error': 'error',
'id': 'id',
'lnn': 'lnn',
'mode': 'mode',
'status': 'status',
'valid': 'valid',
'value': 'value'
}
def __init__(self, allowed=None, enabled=None, error=None, id=None, lnn=None, mode=None, status=None, valid=None, value=None): # noqa: E501
"""NodeStateReadonlyNode - a model defined in Swagger""" # noqa: E501
self._allowed = None
self._enabled = None
self._error = None
self._id = None
self._lnn = None
self._mode = None
self._status = None
self._valid = None
self._value = None
self.discriminator = None
if allowed is not None:
self.allowed = allowed
if enabled is not None:
self.enabled = enabled
if error is not None:
self.error = error
if id is not None:
self.id = id
if lnn is not None:
self.lnn = lnn
if mode is not None:
self.mode = mode
if status is not None:
self.status = status
if valid is not None:
self.valid = valid
if value is not None:
self.value = value
@property
def allowed(self):
"""Gets the allowed of this NodeStateReadonlyNode. # noqa: E501
The current read-only mode allowed status for the node. # noqa: E501
:return: The allowed of this NodeStateReadonlyNode. # noqa: E501
:rtype: bool
"""
return self._allowed
@allowed.setter
def allowed(self, allowed):
"""Sets the allowed of this NodeStateReadonlyNode.
The current read-only mode allowed status for the node. # noqa: E501
:param allowed: The allowed of this NodeStateReadonlyNode. # noqa: E501
:type: bool
"""
self._allowed = allowed
@property
def enabled(self):
"""Gets the enabled of this NodeStateReadonlyNode. # noqa: E501
The current read-only user mode status for the node. NOTE: If read-only mode is currently disallowed for this node, it will remain read/write until read-only mode is allowed again. This value only sets or clears any user-specified requests for read-only mode. If the node has been placed into read-only mode by the system, it will remain in read-only mode until the system conditions which triggered read-only mode have cleared. # noqa: E501
:return: The enabled of this NodeStateReadonlyNode. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this NodeStateReadonlyNode.
The current read-only user mode status for the node. NOTE: If read-only mode is currently disallowed for this node, it will remain read/write until read-only mode is allowed again. This value only sets or clears any user-specified requests for read-only mode. If the node has been placed into read-only mode by the system, it will remain in read-only mode until the system conditions which triggered read-only mode have cleared. # noqa: E501
:param enabled: The enabled of this NodeStateReadonlyNode. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def error(self):
"""Gets the error of this NodeStateReadonlyNode. # noqa: E501
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:return: The error of this NodeStateReadonlyNode. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this NodeStateReadonlyNode.
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:param error: The error of this NodeStateReadonlyNode. # noqa: E501
:type: str
"""
if error is not None and len(error) > 8192:
raise ValueError("Invalid value for `error`, length must be less than or equal to `8192`") # noqa: E501
if error is not None and len(error) < 0:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `0`") # noqa: E501
self._error = error
@property
def id(self):
"""Gets the id of this NodeStateReadonlyNode. # noqa: E501
Node ID (Device Number) of a node. # noqa: E501
:return: The id of this NodeStateReadonlyNode. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NodeStateReadonlyNode.
Node ID (Device Number) of a node. # noqa: E501
:param id: The id of this NodeStateReadonlyNode. # noqa: E501
:type: int
"""
if id is not None and id > 2147483647: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `2147483647`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def lnn(self):
"""Gets the lnn of this NodeStateReadonlyNode. # noqa: E501
Logical Node Number (LNN) of a node. # noqa: E501
:return: The lnn of this NodeStateReadonlyNode. # noqa: E501
:rtype: int
"""
return self._lnn
@lnn.setter
def lnn(self, lnn):
"""Sets the lnn of this NodeStateReadonlyNode.
Logical Node Number (LNN) of a node. # noqa: E501
:param lnn: The lnn of this NodeStateReadonlyNode. # noqa: E501
:type: int
"""
if lnn is not None and lnn > 65535: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value less than or equal to `65535`") # noqa: E501
if lnn is not None and lnn < 1: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value greater than or equal to `1`") # noqa: E501
self._lnn = lnn
@property
def mode(self):
"""Gets the mode of this NodeStateReadonlyNode. # noqa: E501
The current read-only mode status for the node. # noqa: E501
:return: The mode of this NodeStateReadonlyNode. # noqa: E501
:rtype: bool
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this NodeStateReadonlyNode.
The current read-only mode status for the node. # noqa: E501
:param mode: The mode of this NodeStateReadonlyNode. # noqa: E501
:type: bool
"""
self._mode = mode
@property
def status(self):
"""Gets the status of this NodeStateReadonlyNode. # noqa: E501
The current read-only mode status description for the node. # noqa: E501
:return: The status of this NodeStateReadonlyNode. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this NodeStateReadonlyNode.
The current read-only mode status description for the node. # noqa: E501
:param status: The status of this NodeStateReadonlyNode. # noqa: E501
:type: str
"""
self._status = status
@property
def valid(self):
"""Gets the valid of this NodeStateReadonlyNode. # noqa: E501
The read-only state values are valid (False = Error). # noqa: E501
:return: The valid of this NodeStateReadonlyNode. # noqa: E501
:rtype: bool
"""
return self._valid
@valid.setter
def valid(self, valid):
"""Sets the valid of this NodeStateReadonlyNode.
The read-only state values are valid (False = Error). # noqa: E501
:param valid: The valid of this NodeStateReadonlyNode. # noqa: E501
:type: bool
"""
self._valid = valid
@property
def value(self):
"""Gets the value of this NodeStateReadonlyNode. # noqa: E501
The current read-only value (enumerated bitfield) for the node. # noqa: E501
:return: The value of this NodeStateReadonlyNode. # noqa: E501
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this NodeStateReadonlyNode.
The current read-only value (enumerated bitfield) for the node. # noqa: E501
:param value: The value of this NodeStateReadonlyNode. # noqa: E501
:type: int
"""
if value is not None and value > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `value`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if value is not None and value < 0: # noqa: E501
raise ValueError("Invalid value for `value`, must be a value greater than or equal to `0`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeStateReadonlyNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.450704 | 450 | 0.593229 |
254a672523f11606bab30f0265bc21aeb4ad2bb3 | 7,104 | py | Python | Code/Components/Synthesis/testdata/current/simulation/synthregression/synthprogrunner.py | rtobar/askapsoft | 6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | Code/Components/Synthesis/testdata/current/simulation/synthregression/synthprogrunner.py | rtobar/askapsoft | 6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | Code/Components/Synthesis/testdata/current/simulation/synthregression/synthprogrunner.py | rtobar/askapsoft | 6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # helper class for synthesis regression testing
import os,math
# helper class to run a program from synthesis
class SynthesisProgramRunner:
def __init__(self, template_parset = None):
'''
initialise the class
template_parset - file name for the template parset file
containing all parameters, which are not supposed to
change
'''
if not os.path.exists(template_parset):
raise RuntimeError, "Template parset file %s is not found" % template_parset
self.template_parset = template_parset
if 'ASKAP_ROOT' not in os.environ:
raise RuntimeError, "ASKAP_ROOT should be initialised first!"
if 'AIPSPATH' not in os.environ:
os.environ['AIPSPATH'] = os.path.join(os.environ['ASKAP_ROOT'],'Code/Base/accessors/current')
self.simulator = os.path.join(os.environ['ASKAP_ROOT'],'Code/Components/Synthesis/synthesis/current/apps/csimulator.sh')
self.imager = os.path.join(os.environ['ASKAP_ROOT'],'Code/Components/Synthesis/synthesis/current/apps/cimager.sh')
self.calibrator = os.path.join(os.environ['ASKAP_ROOT'],'Code/Components/Synthesis/synthesis/current/apps/ccalibrator.sh')
self.imgstat = os.path.join(os.environ['ASKAP_ROOT'],'Code/Components/Synthesis/synthesis/current/apps/imgstat.sh')
if not os.path.exists(self.simulator):
raise RuntimeError, "csimulator is missing at %s" % self.simulator
if not os.path.exists(self.imager):
raise RuntimeError, "cimager is missing at %s" % self.imager
if not os.path.exists(self.calibrator):
raise RuntimeError, "ccalibrator is missing at %s" % self.calibrator
if not os.path.exists(self.imgstat):
raise RuntimeError, "imgstat is missing at %s" % self.imgstat
self.tmp_parset = "temp_parset.in"
self.initParset()
def initParset(self):
'''
Initialise temporary parset to the template
'''
if os.path.exists(self.tmp_parset):
print "WARNING. File %s is overwritten" % self.tmp_parset
os.system("rm -f %s" % self.tmp_parset)
os.system("cp %s %s" % (self.template_parset, self.tmp_parset))
def addToParset(self,str):
'''
Add the given string to the temporary parset file (created in
the constructor and passed to all commands executed throughout
the lifetime of this object
str string to add
'''
os.system("echo \'%s\' >> %s" % (str, self.tmp_parset))
def runCommand(self,cmd):
'''
Run given command on a current parset
cmd - command
'''
res = os.system("%s -c %s" % (cmd, self.tmp_parset))
if res != 0:
raise RuntimeError, "Command %s failed with error %s" % (cmd,res)
def runSimulator(self):
'''
Run csimulator on a current parset
'''
self.runCommand(self.simulator)
def runCalibrator(self):
'''
Run ccalibrator on a current parset
'''
self.runCommand(self.calibrator)
def runImager(self):
'''
Run cimager on a current parset
'''
self.runCommand(self.imager)
def imageStats(self, name):
'''
Get image statistics
name - image name
'''
if not os.path.exists(name):
raise RuntimeError, "Image %s doesn't exist" % name
imgstat_out = ".tmp.imgstat"
if os.path.exists(imgstat_out):
os.system("rm -f %s" % imgstat_out)
res = os.system("%s %s > %s" % (self.imgstat,name,imgstat_out))
if res != 0:
raise RuntimeError, "Command %s failed with error %s" % (self.imgstat,res)
result = {}
f = file(imgstat_out)
try:
row = 0
for line in f:
parts = line.split()
if len(parts)<2 and row>0:
raise RuntimeError, "Expected at least 2 elements in row %i, you have: %s" % (row+1,parts)
if row == 0:
if len(parts)<4:
raise RuntimeError, "Expected at least 4 columns on the first row, you have: %s " % (parts,)
result['peak'] = float(parts[0])
if parts[3] != "(J2000)":
raise RuntimeError, "Expected J2000 as the 4th element, you have: %s " % (parts,)
elif row == 1:
result['ra'] = float(parts[0])
result['dec'] = float(parts[1])
elif row == 2:
result['rms'] = float(parts[0])
result['median'] = float(parts[1])
row = row + 1
finally:
f.close()
return result
# angular distance in degrees of the peak from the given point
# ra and dec are J2000 coordinates in degrees
# stats - dictionary with 'ra' and 'dec' field giving the position of the peak
def getDistance(stats, ra, dec):
ra1 = stats['ra']
dec1 = stats['dec']
cosd = math.sin(math.pi/180.*dec1)*math.sin(math.pi/180.*dec)+math.cos(math.pi/180.*dec1)*math.cos(math.pi/180.*dec)* math.cos(math.pi/180.*(ra1-ra));
return math.acos(cosd)*180./math.pi;
# formulae for offset direction via true angles to test that position is at the right spot
def offsetDirection(ref,l,m):
'''
ref - a list or a tuple with 2 elements being ra and dec in degrees of the reference direction
l,m - true angle offsets in longitude and latitude respectively (also in degrees)
'''
if len(ref)!=2:
raise RuntimeError, "Expected two-element list or tuple, you have: %s" % (ref,)
# sin and cos of the longitude offset
sL = math.sin(l/180.*math.pi)
cL = math.cos(l/180.*math.pi)
# sin and cos of the reference longitude
cLong = math.cos(ref[0]/180.*math.pi)
sLong = math.sin(ref[0]/180.*math.pi)
# sin and cos of the sum of the latitude offset and reference latitude
sLatSum = math.sin((ref[1]+m)/180.*math.pi)
cLatSum = math.cos((ref[1]+m)/180.*math.pi)
# coordinates of the resulting direction vector
r1 = cLatSum * cL * cLong - sL * sLong
r2 = cLatSum * cL * sLong + sL * cLong
r3 = sLatSum * cL
# result
resLat = math.asin(r3)/math.pi*180.
resLong = math.atan2(r2,r1)/math.pi*180.
return (resLong, resLat)
# formulae for SIN-projection to test that position is at the right spot
def sinProjection(ref,l,m):
'''
ref - a list or a tuple with 2 elements being ra and dec in degrees of the tangent point
l,m - offsets in the tangent plane in degrees
Return: two element tuple with ra and dec of the offset position (degrees)
'''
if len(ref)!=2:
raise RuntimeError, "Expected two-element list or tuple, you have: %s" % (ref,)
# offsets in radians
L = l/180.*math.pi
M = m/180.*math.pi
# sin and cos of ref. declination
cDelta0 = math.cos(ref[1]/180.*math.pi)
sDelta0 = math.sin(ref[1]/180.*math.pi)
# actual result
dec = math.asin(M*cDelta0+sDelta0*math.sqrt(1.-L*L-M*M))/math.pi*180.
ra = ref[0] + math.atan2(L,cDelta0*math.sqrt(1.-L*L-M*M)-M*sDelta0)/math.pi*180.
return (ra,dec)
| 37.389474 | 153 | 0.616554 |
05334b1d05a912bce0aa968400472cf5db523355 | 268 | py | Python | blogproject/comments/admin.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | blogproject/comments/admin.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | blogproject/comments/admin.py | MrWolffy/django-tutorial | 4b00e35092d47e9a04a7019c3f803c5b09630ec6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Comment
class CommentAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'url', 'post', 'created_time']
fields = ['name', 'email', 'url', 'text', 'post']
admin.site.register(Comment, CommentAdmin)
| 24.363636 | 67 | 0.690299 |
296a2b8c76fee95fe1a95271db23b9e391185b69 | 1,445 | py | Python | named_entity_recognition/src/classifiers/label_count_driver.py | anishacharya/nlp-wrapper | c9722d541c8f90c178330483eaf060d6dd52f357 | [
"MIT"
] | 2 | 2020-05-17T19:48:25.000Z | 2020-06-11T03:25:37.000Z | named_entity_recognition/src/classifiers/label_count_driver.py | anishacharya/nlp-wrapper | c9722d541c8f90c178330483eaf060d6dd52f357 | [
"MIT"
] | null | null | null | named_entity_recognition/src/classifiers/label_count_driver.py | anishacharya/nlp-wrapper | c9722d541c8f90c178330483eaf060d6dd52f357 | [
"MIT"
] | 4 | 2020-06-11T03:25:41.000Z | 2021-03-26T13:34:07.000Z | from collections import Counter
from typing import List
from src.data_utils.definitions import LabeledSentence, PersonExample
from src.models.label_count import LabelCount, LabelCountBinary
def train_label_count_binary_ner(ner_exs: List[PersonExample]) -> LabelCountBinary:
"""
:param ner_exs: training examples to build the count-based classifier from
:return: A CountBasedPersonClassifier using counts collected from the given examples
"""
pos_counts = Counter()
neg_counts = Counter()
for ex in ner_exs:
for idx in range(0, len(ex)):
if ex.labels[idx] == 1:
pos_counts[ex.tokens[idx].word] += 1.0
else:
neg_counts[ex.tokens[idx].word] += 1.0
return LabelCountBinary(pos_counts, neg_counts)
def train_label_count_ner(training_set: List[LabeledSentence]) -> LabelCount:
"""
:param training_set: labeled NER sentences to extract a BadNerModel from
:return: the BadNerModel based on counts collected from the training data
"""
words_to_tag_counters = {}
for sentence in training_set:
tags = sentence.get_bio_tags()
for idx in range(0, len(sentence)):
word = sentence.tokens[idx].word
if word not in words_to_tag_counters:
words_to_tag_counters[word] = Counter()
words_to_tag_counters[word][tags[idx]] += 1.0
return LabelCount(words_to_tag_counters)
| 38.026316 | 88 | 0.688581 |
fa45858d51511af2f49810a21dad0b61aac174d8 | 3,716 | py | Python | test/functional/interface_rpc.py | MrCryptoBeast/WWW | 857e860df0aa1bc7fde2ee6f5918ff32933beeb3 | [
"MIT"
] | null | null | null | test/functional/interface_rpc.py | MrCryptoBeast/WWW | 857e860df0aa1bc7fde2ee6f5918ff32933beeb3 | [
"MIT"
] | null | null | null | test/functional/interface_rpc.py | MrCryptoBeast/WWW | 857e860df0aa1bc7fde2ee6f5918ff32933beeb3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The worldwideweb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import worldwidewebTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
from threading import Thread
import subprocess
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none")
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
def test_work_queue_getblock(node, got_exceeded_error):
while not got_exceeded_error:
try:
node.cli('getrpcinfo').send_cli()
except subprocess.CalledProcessError as e:
assert_equal(e.output, 'error: Server response: Work queue depth exceeded\n')
got_exceeded_error.append(True)
class RPCInterfaceTest(worldwidewebTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def test_work_queue_exceeded(self):
self.log.info("Testing work queue exceeded...")
self.restart_node(0, ['-rpcworkqueue=1', '-rpcthreads=1'])
got_exceeded_error = []
threads = []
for _ in range(3):
t = Thread(target=test_work_queue_getblock, args=(self.nodes[0], got_exceeded_error))
t.start()
threads.append(t)
for t in threads:
t.join()
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
self.test_work_queue_exceeded()
if __name__ == '__main__':
RPCInterfaceTest().main()
| 35.730769 | 99 | 0.655005 |
d0f76fc6f27de361485fada0fb66c0716326f5fd | 6,086 | py | Python | tests/denon/test_http_api.py | JPHutchins/pyavreceiver | 2c86d0ab1f3bca886d2a876096ac760ffb1dcd5f | [
"Apache-2.0"
] | 2 | 2020-12-28T06:09:18.000Z | 2021-01-09T22:36:57.000Z | tests/denon/test_http_api.py | JPHutchins/pyavreceiver | 2c86d0ab1f3bca886d2a876096ac760ffb1dcd5f | [
"Apache-2.0"
] | 1 | 2021-02-03T22:59:49.000Z | 2021-02-03T22:59:49.000Z | tests/denon/test_http_api.py | JPHutchins/pyavreceiver | 2c86d0ab1f3bca886d2a876096ac760ffb1dcd5f | [
"Apache-2.0"
] | null | null | null | """Test getting device information via HTTP API."""
from pyavreceiver.denon import const as denon_const
from pyavreceiver.denon.http_api import DenonHTTPApi
def test_get_renamed_and_deleted_sources():
"""Test getting dict of renamed sources."""
# AppCommand
with open("tests/denon/fixtures/GetRename-Delete-X1500H.xml") as file:
xml = file.read()
rename_map = DenonHTTPApi.make_renamed_dict(xml)
assert rename_map["STEAM"] == denon_const.SOURCE_CABLE
assert rename_map["Test"] == denon_const.SOURCE_DVD
assert rename_map["XBOX 360"] == denon_const.SOURCE_BLURAY
assert not rename_map.get("Game") # deleted source
assert rename_map["AUX"] == denon_const.SOURCE_AUX
assert rename_map["Media Player"] == denon_const.SOURCE_MEDIA_PLAYER
assert rename_map["Tuner"] == denon_const.SOURCE_TUNER
assert rename_map["HEOS Music"] == denon_const.SOURCE_NETWORK
assert rename_map["TV Audio"] == denon_const.SOURCE_TV_AUDIO
assert rename_map["Bluetooth"] == denon_const.SOURCE_BLUETOOTH
assert rename_map["Phono"] == denon_const.SOURCE_PHONO
with open("tests/denon/fixtures/GetRename-Delete-NR1604.xml") as file:
xml = file.read()
rename_map = DenonHTTPApi.make_renamed_dict(xml)
assert rename_map == {
"TUNER": "TUNER",
"CD": "CD",
"M-XPort": "M-XPORT",
"NETWORK": "NET",
"DVD": "DVD",
"Blu-ray": "BD",
"TV AUDIO": "TV",
"CBL/SAT": "SAT/CBL", # would like confirmation
"GAME": "GAME",
"AUX1": "AUX1",
"AUX2": "AUX2",
"MEDIA PLAYER": "MPLAY",
"iPod/USB": "USB/IPOD",
}
# MainZoneXml
with open("tests/denon/fixtures/MainZoneXml-1912.xml") as file:
xml = file.read()
rename_map = DenonHTTPApi.make_renamed_dict_legacy(xml)
assert rename_map == {
"TUNER": "TUNER",
"NET/USB": "NET/USB",
"Chrome": "DVD",
"Kodi": "BD",
"Chrome A": "DOCK",
}
with open("tests/denon/fixtures/MainZoneXml-3311CI.xml") as file:
xml = file.read()
rename_map = DenonHTTPApi.make_renamed_dict_legacy(xml)
assert rename_map == {
"PHONO": "PHONO",
"SqzBox": "CD",
"SACD": "DVD",
"BluRay": "BD",
"SAT/CBL": "SAT/CBL",
"HDMI5": "GAME",
"GAME": "DVR",
"V.AUX": "V.AUX",
}
# MainZoneXmlStatus - doesn't specify deleted sources
with open("tests/denon/fixtures/MainZoneXmlStatus-1912.xml") as file:
xml = file.read()
rename_map = DenonHTTPApi.make_renamed_dict_legacy(xml)
assert rename_map == {
"TUNER": "TUNER",
"CD": "CD",
"NET/USB": "NET/USB",
"Chrome": "DVD",
"Kodi": "BD",
"TV": "TV",
"SAT": "SAT/CBL",
"Chrome A": "DOCK",
"GAME2": "GAME2",
"GAME1": "GAME",
"V.AUX": "V.AUX",
}
def test_read_device_info_x_series_xml():
"""Test getting model, mac, and zone count from X-series."""
with open("tests/denon/fixtures/Deviceinfo-X1500H.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "AVR-X1500H"
assert api.device_info["mac_address"] == "0005CDD1F6E8"
assert api.device_info["zones"] == 2
with open("tests/denon/fixtures/Deviceinfo-X8500H.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "AVC-X8500H"
assert api.device_info["mac_address"] == "0005CDA60D0C"
assert api.device_info["zones"] == 3
with open("tests/denon/fixtures/Deviceinfo-X2000.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "*AVR-X2000"
assert api.device_info["mac_address"] == "0005CD3A0525"
assert api.device_info["zones"] == 2
with open("tests/denon/fixtures/Deviceinfo-X1100W.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "*AVR-X1100W"
assert api.device_info["mac_address"] == "0005CD49AC13"
assert api.device_info["zones"] == 2
with open("tests/denon/fixtures/Deviceinfo-SR5008.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "*SR5008"
assert api.device_info["mac_address"] == "0006781D2F2F"
assert api.device_info["zones"] == 2
with open("tests/denon/fixtures/Deviceinfo-NR1604.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "*NR1604"
assert api.device_info["mac_address"] == "0006781C2177"
assert api.device_info["zones"] == 2
with open("tests/denon/fixtures/upnp-X1500H.xml") as file:
xml = file.read()
api = DenonHTTPApi("")
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "Denon AVR-X1500H"
assert api.device_info["mac_address"] is None
assert api.device_info["serial_number"] == "AYW27181117704"
assert api.device_info["zones"] == 1
assert api.device_info["manufacturer"] == "Denon"
assert api.device_info["friendly_name"] == "TV Speakers"
# Test parsing the info then upnp, as in the get_device_info function
api = DenonHTTPApi("")
with open("tests/denon/fixtures/Deviceinfo-X1500H.xml") as file:
xml = file.read()
api.make_device_info_dict(xml)
with open("tests/denon/fixtures/upnp-X1500H.xml") as file:
xml = file.read()
api.make_device_info_dict(xml)
assert api.device_info["model_name"] == "AVR-X1500H"
assert api.device_info["mac_address"] == "0005CDD1F6E8"
assert api.device_info["serial_number"] == "AYW27181117704"
assert api.device_info["zones"] == 2
assert api.device_info["manufacturer"] == "Denon"
assert api.device_info["friendly_name"] == "TV Speakers"
| 37.567901 | 74 | 0.640815 |
9d8fb69936fe9ee89d710b9644df2bde53a12ed8 | 10,536 | py | Python | var/spack/repos/builtin/packages/fftw/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/fftw/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/fftw/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import os.path
import llnl.util.lang
from spack.package import *
class FftwBase(AutotoolsPackage):
"""Base class for building Fftw, shared with the AMD optimized version
of the library in the 'amdfftw' package.
"""
variant(
'precision', values=any_combination_of(
'float', 'double', 'long_double', 'quad'
).prohibit_empty_set().with_default('float,double'),
description='Build the selected floating-point precision libraries'
)
variant('openmp', default=False, description="Enable OpenMP support.")
variant('mpi', default=True, description='Activate MPI support')
depends_on('mpi', when='+mpi')
depends_on('llvm-openmp', when='%apple-clang +openmp')
# https://github.com/FFTW/fftw3/commit/902d0982522cdf6f0acd60f01f59203824e8e6f3
conflicts('%gcc@8.0:8', when="@3.3.7")
conflicts('precision=long_double', when='@2.1.5',
msg='Long double precision is not supported in FFTW 2')
conflicts('precision=quad', when='@2.1.5',
msg='Quad precision is not supported in FFTW 2')
conflicts('precision=quad', when='+mpi',
msg='Quad precision is not supported in MPI')
@property
def libs(self):
# Reduce repetitions of entries
query_parameters = list(llnl.util.lang.dedupe(
self.spec.last_query.extra_parameters
))
# List of all the suffixes associated with float precisions
precisions = [
('float', 'f'),
('double', ''),
('long_double', 'l'),
('quad', 'q')
]
# Retrieve the correct suffixes, or use double as a default
suffixes = [v for k, v in precisions if k in query_parameters] or ['']
# Construct the list of libraries that needs to be found
libraries = []
for sfx in suffixes:
if 'mpi' in query_parameters and '+mpi' in self.spec:
libraries.append('libfftw3' + sfx + '_mpi')
if 'openmp' in query_parameters and '+openmp' in self.spec:
libraries.append('libfftw3' + sfx + '_omp')
libraries.append('libfftw3' + sfx)
return find_libraries(libraries, root=self.prefix, recursive=True)
def patch(self):
# If fftw/config.h exists in the source tree, it will take precedence
# over the copy in build dir. As only the latter has proper config
# for our build, this is a problem. See e.g. issue #7372 on github
if os.path.isfile('fftw/config.h'):
os.rename('fftw/config.h', 'fftw/config.h.SPACK_RENAMED')
def autoreconf(self, spec, prefix):
if '+pfft_patches' in spec:
autoreconf = which('autoreconf')
autoreconf('-ifv')
@property
def selected_precisions(self):
"""Precisions that have been selected in this build"""
return self.spec.variants['precision'].value
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang +openmp'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
# FFTW first configures libtool without MPI, and later uses it with
# MPI. libtool then calls wrong linker to create shared libraries
# (it calls `$CC` instead of `$MPICC`), and MPI symbols
# remain undefined because `-lmpi` is not passed to the linker.
# https://github.com/FFTW/fftw3/issues/274
# https://github.com/spack/spack/issues/29224
if self.spec.satisfies('+mpi') and self.spec.satisfies('platform=darwin'):
env.append_flags('LIBS', self.spec['mpi'].libs.ld_flags)
def configure(self, spec, prefix):
# Base options
options = [
'--prefix={0}'.format(prefix),
'--enable-shared',
'--enable-threads'
]
if not self.compiler.f77 or not self.compiler.fc:
options.append("--disable-fortran")
if spec.satisfies('@:2'):
options.append('--enable-type-prefix')
# Variants that affect every precision
if '+openmp' in spec:
options.append('--enable-openmp')
if spec.satisfies('@:2'):
# TODO: libtool strips CFLAGS, so 2.x libxfftw_threads
# isn't linked to the openmp library. Patch Makefile?
options.insert(0, 'CFLAGS=' + self.compiler.openmp_flag)
if '+mpi' in spec:
options.append('--enable-mpi')
# Specific SIMD support.
# all precisions
simd_features = ['sse2', 'avx', 'avx2', 'avx512', 'avx-128-fma',
'kcvi', 'vsx']
# float only
float_simd_features = ['altivec', 'sse', 'neon']
# Workaround PGI compiler bug when avx2 is enabled
if spec.satisfies('%pgi') and 'avx2' in simd_features:
simd_features.remove('avx2')
# Workaround NVIDIA/PGI compiler bug when avx512 is enabled
if spec.satisfies('%nvhpc') or spec.satisfies('%pgi'):
if 'avx512' in simd_features:
simd_features.remove('avx512')
# NVIDIA compiler does not support Altivec intrinsics
if spec.satisfies('%nvhpc') and 'vsx' in simd_features:
simd_features.remove('vsx')
if spec.satisfies('%nvhpc') and 'altivec' in float_simd_features:
float_simd_features.remove('altivec')
# NVIDIA compiler does not support Neon intrinsics
if spec.satisfies('%nvhpc') and 'neon' in simd_features:
simd_features.remove('neon')
simd_options = []
for feature in simd_features:
msg = '--enable-{0}' if feature in spec.target else '--disable-{0}'
simd_options.append(msg.format(feature))
# If no features are found, enable the generic ones
if not any(f in spec.target for f in
simd_features + float_simd_features):
# Workaround NVIDIA compiler bug
if not spec.satisfies('%nvhpc'):
simd_options += [
'--enable-generic-simd128',
'--enable-generic-simd256'
]
simd_options += [
'--enable-fma' if 'fma' in spec.target else '--disable-fma'
]
# Double is the default precision, for all the others we need
# to enable the corresponding option.
enable_precision = {
'float': ['--enable-float'],
'double': None,
'long_double': ['--enable-long-double'],
'quad': ['--enable-quad-precision']
}
# Different precisions must be configured and compiled one at a time
configure = Executable('../configure')
for precision in self.selected_precisions:
opts = (enable_precision[precision] or []) + options[:]
# SIMD optimizations are available only for float and double
# starting from FFTW 3
if precision in ('float', 'double') and spec.satisfies('@3:'):
opts += simd_options
# float-only acceleration
if precision == 'float':
for feature in float_simd_features:
if feature in spec.target:
msg = '--enable-{0}'
else:
msg = '--disable-{0}'
opts.append(msg.format(feature))
with working_dir(precision, create=True):
configure(*opts)
def for_each_precision_make(self, *targets):
for precision in self.selected_precisions:
with working_dir(precision):
make(*targets)
def build(self, spec, prefix):
self.for_each_precision_make()
def check(self):
self.for_each_precision_make('check')
def install(self, spec, prefix):
self.for_each_precision_make('install')
class Fftw(FftwBase):
"""FFTW is a C subroutine library for computing the discrete Fourier
transform (DFT) in one or more dimensions, of arbitrary input
size, and of both real and complex data (as well as of even/odd
data, i.e. the discrete cosine/sine transforms or DCT/DST). We
believe that FFTW, which is free software, should become the FFT
library of choice for most applications."""
homepage = "https://www.fftw.org"
url = "https://www.fftw.org/fftw-3.3.4.tar.gz"
list_url = "https://www.fftw.org/download.html"
version('3.3.10', sha256='56c932549852cddcfafdab3820b0200c7742675be92179e59e6215b340e26467')
version('3.3.9', sha256='bf2c7ce40b04ae811af714deb512510cc2c17b9ab9d6ddcf49fe4487eea7af3d')
version('3.3.8', sha256='6113262f6e92c5bd474f2875fa1b01054c4ad5040f6b0da7c03c98821d9ae303')
version('3.3.7', sha256='3b609b7feba5230e8f6dd8d245ddbefac324c5a6ae4186947670d9ac2cd25573')
version('3.3.6-pl2', sha256='a5de35c5c824a78a058ca54278c706cdf3d4abba1c56b63531c2cb05f5d57da2')
version('3.3.5', sha256='8ecfe1b04732ec3f5b7d279fdb8efcad536d555f9d1e8fabd027037d45ea8bcf')
version('3.3.4', sha256='8f0cde90929bc05587c3368d2f15cd0530a60b8a9912a8e2979a72dbe5af0982')
version('2.1.5', sha256='f8057fae1c7df8b99116783ef3e94a6a44518d49c72e2e630c24b689c6022630')
variant(
'pfft_patches', default=False,
description='Add extra transpose functions for PFFT compatibility')
depends_on('automake', type='build', when='+pfft_patches')
depends_on('autoconf', type='build', when='+pfft_patches')
depends_on('libtool', type='build', when='+pfft_patches')
provides('fftw-api@2', when='@2.1.5')
provides('fftw-api@3', when='@3:')
patch('pfft-3.3.9.patch', when="@3.3.9:+pfft_patches", level=0)
patch('pfft-3.3.5.patch', when="@3.3.5:3.3.8+pfft_patches", level=0)
patch('pfft-3.3.4.patch', when="@3.3.4+pfft_patches", level=0)
patch('pgi-3.3.6-pl2.patch', when="@3.3.6-pl2%pgi", level=0)
patch('intel-configure.patch', when="@3:3.3.8%intel", level=0)
| 40.837209 | 99 | 0.617027 |
81d94496a53b757f933d5f2d56e71618f0e22b68 | 8,108 | py | Python | core_admin/skybot/import_positions.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | core_admin/skybot/import_positions.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | 112 | 2018-04-24T19:10:55.000Z | 2022-02-26T16:55:02.000Z | core_admin/skybot/import_positions.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | import linecache
import logging
from datetime import datetime, timedelta, timezone
from io import StringIO
import humanize
import pandas as pd
from django.conf import settings
from tno.db import DBBase
class ImportSkybotPositions():
def __init__(self):
self.logger = logging.getLogger("skybot_load_data")
# Abre conexão com o banco usando sqlAlchemy
self.dbbase = DBBase(pool=False)
def import_output_file(self, filepath):
"""Importa os resultados na tabela skybot
Arguments:
filepath {str} -- Filepath do arquio de outputs do skybot.
Returns:
pandas.Dataframe -- Dataframe com o conteudo do arquivo.
"""
self.logger.debug("Importing Skybot Output: [%s]" % filepath)
try:
t0 = datetime.now(timezone.utc)
flag = self.read_flag_from_output(filepath)
# só executa a função de importação se tiver dados.
if flag > 0:
# Le o arquivo de outputs e gera um pandas dataframe
df = self.read_output_file(filepath)
rowcount = self.import_data(df)
else:
self.logger.debug(
"Skybot returned 0 Positions. means that no body has been found")
df = self.create_empty_dataframe()
rowcount = 0
t1 = datetime.now(timezone.utc)
tdelta = t1 - t0
self.logger.debug("Imported Skybot [%s] Positions in %s" % (
rowcount, humanize.naturaldelta(tdelta, minimum_unit="milliseconds")))
return df
except Exception as e:
self.logger.error(e)
raise (e)
def read_output_file(self, filepath):
# self.logger.debug("Reading Skybot Output: [%s]" % filepath)
# Headers que estão no arquivo e na ordem correta de leitura.
headers = ["number", "name", "ra", "dec", "dynclass", "mv", "errpos", "d", "dracosdec",
"ddec", "dgeo", "dhelio", "phase", "solelong", "px", "py", "pz", "vx", "vy", "vz", "jdref"]
df = pd.read_csv(filepath, skiprows=3, delimiter='|', names=headers)
# Tratar o campo num para retirar os caracteres -
df['number'] = df['number'].apply(
lambda x: x if str(x).strip() is not '-' else '')
# Adiciona colunas para RA e Dec em graus.
df['raj2000'] = 0
df['decj2000'] = 0
# Converter as coordenadas de HMS para Degrees
df['raj2000'] = df['ra'].apply(lambda x: self.convert_ra_hms_deg(x))
df['decj2000'] = df['dec'].apply(lambda x: self.convert_dec_hms_deg(x))
# Retirar os espaços entre os valores
df = df.applymap(lambda x: x.strip() if type(x) == str else x)
# Adicionar uma coluna com o Ticket do Skybot
df['ticket'] = self.read_ticket_from_output(filepath)
df['base_dynclass'] = df['dynclass'].apply(lambda x: x.split('>')[0])
# Mudar a ordem das colunas de arcordo com a ordem da tabela.
# Isso facilita a importacao por csv.
columns = self.get_columns()
df = df.reindex(columns=columns)
# self.logger.debug(df.head)
return df
def get_columns(self):
columns = ['name', 'number', 'dynclass', 'ra', 'dec', 'raj2000', 'decj2000', 'mv', 'errpos', 'd', 'dracosdec',
'ddec', 'dgeo', 'dhelio', 'phase', 'solelong', 'px', 'py', 'pz', 'vx', 'vy', 'vz', 'jdref', 'ticket', 'base_dynclass']
return columns
def create_empty_dataframe(self):
df = pd.DataFrame(columns=self.get_columns())
return df
def import_data(self, dataframe):
"""
Convert the dataframe to csv, and import it into the database.
Parameters:
dataframe (dataframe): Pandas Dataframe with the information to be imported.
Returns:
rowcount (int): the number of rows imported.
Example SQL Copy:
COPY tno_skybotoutput (num, name, dynclass, ra, dec, raj2000, decj2000, mv, errpos, d, dracosdec, ddec, dgeo, dhelio, phase, solelong, px, py, pz, vx, vy, vz, jdref) FROM '/data/teste.csv' with (FORMAT CSV, DELIMITER ';', HEADER);
"""
# Converte o Data frame para csv e depois para arquivo em memória.
# Mantem o header do csv para que seja possivel escolher na query COPY quais colunas
# serão importadas.
# Desabilita o index para o pandas não criar uma coluna a mais com id que não corresponde a tabela.
self.logger.debug("Converting the pandas dataframe to csv")
data = StringIO()
dataframe.to_csv(
data,
sep="|",
header=True,
index=False,
)
data.seek(0)
try:
self.logger.debug("Executing the import function on the database.")
# Recupera o nome da tabela skybot output
table = str(self.dbbase.get_table_skybot())
# Sql Copy com todas as colunas que vão ser importadas e o formato do csv.
sql = "COPY %s (name, number, dynclass, ra, dec, raj2000, decj2000, mv, errpos, d, dracosdec, ddec, dgeo, dhelio, phase, solelong, px, py, pz, vx, vy, vz, jdref, ticket, base_dynclass) FROM STDIN with (FORMAT CSV, DELIMITER '|', HEADER);" % table
# Executa o metodo que importa o arquivo csv na tabela.
rowcount = self.dbbase.import_with_copy_expert(sql, data)
self.logger.debug("Successfully imported")
# Retorna a quantidade de linhas que foram inseridas.
return rowcount
except Exception as e:
raise Exception("Failed to import data. Error: [%s]" % e)
def read_ticket_from_output(self, filepath):
"""
Read the output file and retrieve the ticket number on the second line.
this ticket identifies the request that was made for the Skybot service.
Parameters:
filepath (str): Output file returned by the skybot service.
Returns:
ticket (int): Ticket number, example: 166515392791779001
"""
# Le o arquivo de outputs e recupera o ticket.
# ticket é um id que identifica a requisição feita no skybot.
# serve para agrupar todos os resultados a mesma requisição.
line = linecache.getline(str(filepath), 2)
ticket = int(line.split(':')[1].strip())
self.logger.debug("Skybot Ticket: [%s]" % ticket)
return ticket
def read_flag_from_output(self, filepath):
""" Le o arquivo de outputs e recupera o flag.
Arguments:
filepath {[type]} -- [description]
Returns:
int -- the status of the response:
flag=1 means that a body has been found;
flag=0 means that no body has been found;
flag=-1 means that an error occured 'ticket'
"""
line = linecache.getline(str(filepath), 1)
flag = int(line.split(':')[1].strip())
self.logger.debug("Skybot Flag: [%s]" % flag)
return flag
def convert_ra_hms_deg(self, ra=''):
"""
Converte RA em HMS para Degrees.
Parameters:
ra (str): RA em horas. exemplo '23 56 47.2833'.
Returns:
ra (float): RA em degrees. exemplo 359.19701375.
"""
H, M, S = [float(i) for i in ra.split()]
RA = (H + M/60. + S/3600.)*15.
return float("{0:.4f}".format(RA))
def convert_dec_hms_deg(self, dec=''):
"""
Converte Dec em HMS para Degrees.
Parameters:
de (str): Dec em horas. exemplo '-00 53 27.975'.
Returns:
dec (float): Dec em degrees. exemplo -0.8911041666666666.
"""
DEC, ds = 0, 1
D, M, S = [float(i) for i in dec.split()]
if str(D)[0] == '-':
ds, D = -1, abs(D)
DEC = ds*(D + M/60. + S/3600.)
return float("{0:.4f}".format(DEC))
| 34.948276 | 258 | 0.575851 |
194db71089f0e98f21c8c9ae74158f3c23db1a53 | 3,912 | py | Python | openstack_monitoring/check_cinder_service.py | tobias-urdin/openstack-monitoring | f9636aeb115e28a385a7485fe03f82cab69b6452 | [
"Apache-2.0"
] | 2 | 2016-10-11T14:58:18.000Z | 2016-10-12T06:22:31.000Z | openstack_monitoring/check_cinder_service.py | tobias-urdin/openstack-monitoring | f9636aeb115e28a385a7485fe03f82cab69b6452 | [
"Apache-2.0"
] | null | null | null | openstack_monitoring/check_cinder_service.py | tobias-urdin/openstack-monitoring | f9636aeb115e28a385a7485fe03f82cab69b6452 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# OpenStack Monitoring
# Copyright (C) 2015 Tobias Urdin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import argparse
from openstack.keystone import KeystoneClient
from openstack.cinder import CinderClient
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
STATE_UNKNOWN = 3
parser = argparse.ArgumentParser(description='Check Cinder Service Status')
parser.add_argument('--auth_url', metavar='http://controller:35357/v2.0',
type=str, required=True, help='Keystone URL')
parser.add_argument('--username', metavar='username', type=str,
required=True, help='Keystone username')
parser.add_argument('--password', metavar='password', type=str,
required=True, help='Keystone password')
parser.add_argument('--domain', metavar='domain', type=str,
default='default', help='Keystone domain')
parser.add_argument('--project', metavar='project', type=str,
required=True, help='Keystone project')
parser.add_argument('--region', metavar='region', type=str,
required=True, help='Region')
parser.add_argument('--cinder_url', metavar='http://controller:8776/v2',
type=str, required=False, help='Cinder endpoint')
parser.add_argument('--insecure', action='store_false', dest='verify',
required=False, help='Disable SSL')
parser.add_argument('--endpoint', metavar='public', type=str,
required=False, help='Endpoint type, default to public')
parser.add_argument('--host', metavar='host', type=str, required=True,
help='Host to check binary on')
parser.add_argument('--binary', metavar='binary', type=str, required=True,
help='Binary service to check')
args = parser.parse_args()
keystone = KeystoneClient(args.auth_url, args.username, args.password,
args.domain, args.project, args.verify,
args.region, args.endpoint)
if keystone is None:
print('CRITICAL: Could not create keystone context')
sys.exit(STATE_CRITICAL)
if keystone.valid() is False:
print('CRITICAL: Keystone context is invalid')
sys.exit(STATE_CRITICAL)
cinder_url = None
if args.cinder_url is not None:
cinder_url = args.cinder_url
cinder = CinderClient(keystone, cinder_url)
if cinder is None:
print('CRITICAL: Could not create cinder context')
sys.exit(STATE_CRITICAL)
service = cinder.get_service(args.host, args.binary)
if service is None:
print(('CRITICAL: Could not retrieve '
'status for %s on %s') % (args.binary, args.host))
sys.exit(STATE_CRITICAL)
if service['status'] == 'enabled':
status = 'CRITICAL'
status_code = STATE_CRITICAL
if service['state'] == 'up':
status = 'OK'
status_code = STATE_OK
print(('%s: %s on %s is enabled '
'with state %s') % (status, service['binary'],
service['host'], service['state']))
sys.exit(status_code)
else:
print(('WARNING: %s on %s is disabled '
'with state %s') % (service['binary'], service['host'],
service['state']))
sys.exit(STATE_WARNING)
print('CRITICAL: Invalid service state for %s on %s' % (args.binary, args.host))
sys.exit(STATE_CRITICAL)
| 36.222222 | 80 | 0.662577 |
f3e4bea8c185a65f90ce15752511542aab9f40a4 | 15,549 | py | Python | mrfd/data_management.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 13 | 2020-11-14T06:26:04.000Z | 2022-03-30T10:46:22.000Z | mrfd/data_management.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 5 | 2020-11-14T06:27:10.000Z | 2021-03-11T23:20:14.000Z | mrfd/data_management.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 4 | 2020-09-06T20:56:23.000Z | 2021-11-25T05:46:59.000Z | import os
import glob
# import h5py
import numpy as np
import tqdm
import cv2
# from util import *
import sys
import csv
import config
import shutil
from data_utils import load_ROI_box_csv,create_img_data_set,split_data_tracks,create_ROI_mask,load_fall_labels,sort_frames,computeOpticalFlow
root_drive = config.root_drive
def get_dir_lists(dset, d_type):
'''
Returns the path of all video/frames folders inside dset/dtype folder Ex. Thermal_track/csv or Thermal_track/video
'''
path_ADL=None
path_Fall=None
if dset == 'Thermal' or dset=='Thermal_track':
path_Fall = root_drive + '/'+dset+'/'+d_type +'/Fall/Fall*'
path_ADL = root_drive + '/'+dset+'/'+d_type +'/NonFall/ADL*'
else:
raise Exception('Dataset not matched')
vid_dir_list_0 = glob.glob(path_ADL)
vid_dir_list_1 = glob.glob(path_Fall)
return vid_dir_list_0, vid_dir_list_1
def get_mapped_dir_lists(dset, d_type='frame',output_dir=None,output_name=None):
'''
recursively map all folders of ADL and FALL to a new directory
d_type- data type of the dataset whose path is mapped
'''
path_ADL=None
path_Fall=None
output_path=output_dir
if output_name!=None:
output_path=root_drive + '/'+output_name+'/'+d_type
if dset == config.root_folder or dset=='Thermal_pose' or dset==config.track_root_folder:
path_Fall = root_drive + '/'+dset+'/'+d_type +'/Fall/Fall*'
path_ADL = root_drive + '/'+dset+'/'+d_type +'/NonFall/ADL*'
else:
print("Dataset not match.... get_mapped_dir_lists function")
vid_dir_list_0 = glob.glob(path_ADL)
vid_dir_list_1 = glob.glob(path_Fall)
ADL_list=[]
Fall_list=[]
for path in vid_dir_list_0:
splits=path.split('/')
ADL_list.append([path,output_path+'/'+splits[-2]+'/'+splits[-1]])
for path in vid_dir_list_1:
splits=path.split('/')
Fall_list.append([path,output_path+'/'+splits[-2]+'/'+splits[-1]])
return ADL_list,Fall_list
def get_ROI_boxes(dset=config.track_root_folder,class_='ADL'):
'''
Returns a dictionary
key:video name ex 'ADL1'
value:dictionary
key:'BOX'-
value: numpy array of car_boxes
key:'NUMBER'
value: list of frame number of corresponding detected bbox
'''
class_ROI={}
ADL_csv_dir_list,Fall_csv_dir_list=get_dir_lists(dset=dset, d_type='csv')
if class_=='ADL':
csv_list=list(map(lambda csv_path: os.path.join(csv_path,os.path.basename(csv_path)+'.csv'),ADL_csv_dir_list))
if len(csv_list)<config.adl_num:
print(len(csv_list),"csv folders found instead of",config.adl_num," in track data folder")
sys.exit(0)
elif class_=='Fall':
csv_list=list(map(lambda csv_path: os.path.join(csv_path,os.path.basename(csv_path)+'.csv'),Fall_csv_dir_list))
if len(csv_list)<config.fall_num:
print(len(csv_list),"csv folders found instead of",config.fall_num," in track data folder")
for csv_path in csv_list:
ROI={}
ROI['BOX'],ROI['NUMBER']=load_ROI_box_csv(csv_path)
class_ROI[os.path.basename(csv_path).split('.')[0]]=ROI
return class_ROI
def copy_file(inp_path,out_path):
try:
dest = shutil.copyfile(inp_path,out_path)
# print("Copied ",dest)
# If source and destination are same
except shutil.SameFileError:
print("Source and destination represents the same file.")
# If destination is a directory.
except IsADirectoryError:
print("Destination is a directory.")
# If there is any permission issue
except PermissionError:
print("Permission denied.")
# For other errors
except :
print("Error occurred while copying file.",inp_path)
def copy_track_frames_video(input_path,output_path,numbers):
'''
Copy frames given frame numbers from input dir to output dir
'''
frames = glob.glob(input_path+'/*.jpg') + glob.glob(input_path+'/*.png')
frames,in_numbers = sort_frames(frames, dset='Thermal')
os.makedirs(output_path, exist_ok=True)
for image_path,in_number in zip(frames,in_numbers):
if in_number in numbers:
out_image_path=os.path.join(output_path,os.path.basename(image_path))
copy_file(image_path,out_image_path)
def copy_track_frames_dataset():
'''
copy tracked frames from dataset to track folder
'''
ADL_ROI=get_ROI_boxes(class_='ADL')
Fall_ROI=get_ROI_boxes(class_='Fall')
ADL_list,Fall_list=get_mapped_dir_lists(dset=config.root_folder, d_type='frame',output_name=config.track_root_folder)
print("Copy tracked frames for ADL videos")
for input_path,output_path in tqdm.tqdm(ADL_list):
video_name=os.path.basename(input_path)
print('Input:',input_path)
print('Copy Destination folder:',output_path)
if os.path.exists(output_path) and os.path.isdir(output_path):
shutil.rmtree(output_path)
copy_track_frames_video(input_path,output_path,numbers=ADL_ROI[video_name]['NUMBER'])
print("Copy tracked frames for Fall videos")
for input_path,output_path in tqdm.tqdm(Fall_list):
video_name=os.path.basename(input_path)
print('Input:',input_path)
print('Copy Destination folder:',output_path)
if os.path.exists(output_path) and os.path.isdir(output_path):
shutil.rmtree(output_path)
copy_track_frames_video(input_path,output_path,numbers=Fall_ROI[video_name]['NUMBER'])
def init_vid(vid_dir = None, vid_class = None,dset='Thermal_track',process_list=['Processed'],win_length=8,split_gap=10,data_shape=(64,64,1),ROI_array=None,fall_labels=None):# TOMOVE
'''
vid_class= 1 or 0
Loads and preprocess a video. First load and preprocess the frames. Sceond create subvideos and labels for them.
returns list of data, list of numbers
'''
print('Loading vid at ', vid_dir)
#Data as numpy array and list of sorted frame numbers
data,frame_numbers,frames_path=create_img_data_set(fpath=vid_dir, data_shape=data_shape,dset=dset, process_list=process_list,ROI_array=ROI_array,sort = True)
#Data as list of numpy array and list of sorted frame numbers
data_list,frame_numbers_list=split_data_tracks(data,frame_numbers,gap=split_gap,win_length=win_length)
#Split frames path
frames_path_list,_=split_data_tracks(frames_path,frame_numbers,gap=split_gap,win_length=win_length)
vid_dir_name = os.path.basename(vid_dir)
print('vid_dir_name', vid_dir_name)
labels = np.array([0] * len(data))
if vid_class==1:
start,end=fall_labels
for i in range(len(frame_numbers)):
if frame_numbers[i]>=start and frame_numbers[i]<=end:
labels[i]=1
print("Start frame:",start)
print("End frame:",end)
labels_list,_=split_data_tracks(labels,frame_numbers,gap=split_gap,win_length=win_length)
total_label=0
for labels in labels_list:
total_label+=len(labels)
print("Total number of labels,",total_label)
total_frame=0
for frames in frame_numbers_list:
total_frame+=len(frames)
print("Total number of frames,",total_frame)
assert total_label==total_frame
return data_list,frame_numbers_list,labels_list,frames_path_list
def load_videos(dset='Thermal_track',vid_class='ADL',input_type='FRAME'):
'''
Load the dataset and precprocess.
vid_class:- ['ADL','Fall']
input_type=['FRAME','ROI_FRAME']
Steps:
Copy tracked frames into track folder
Load ROI boxes from csv files
Load tracked frames, preprocess, split into subvideos, load and create for subvideos
Returns dictionary containing video frame paths, actual frame numbers, preprocessed subvidoes, their masks and labels
'''
ADL_frame_dir_list,Fall_frame_dir_list=get_dir_lists(dset=dset, d_type='frame')
if (len(ADL_frame_dir_list)<config.adl_num or len(Fall_frame_dir_list)<config.fall_num) and dset=='Thermal_track':
print("All videos folder not found in Thermal_track folder. Copying tracked frames from Thermal folder............")
copy_track_frames_dataset()
ADL_frame_dir_list,Fall_frame_dir_list=get_dir_lists(dset=dset, d_type='frame')
if vid_class=='ADL':
ADL_videos={}
if input_type=='ROI_FRAME':
#het ROI boxes and frame numbers
ADL_ROI=get_ROI_boxes(dset=config.track_root_folder,class_='ADL')#Returns dictionary
for ADL_frame_dir in ADL_frame_dir_list:
vid_name=os.path.basename(ADL_frame_dir)
print("\nLoading Video...........",vid_name)
ADL={}
if input_type=='FRAME':
print("\nLoading frame data...........\n")
data_list,frame_numbers_list,labels_list,frames_path=init_vid(vid_dir = ADL_frame_dir, vid_class = 0, dset=dset,process_list=['Processed'],win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP,data_shape=config.LOAD_DATA_SHAPE,fall_labels=None)
ADL['FRAME']=data_list
ADL['NUMBER']=frame_numbers_list
ADL['LABELS']=labels_list
ADL['PATH']=frames_path
elif input_type=='ROI_FRAME':
print("\nLoading data and masking...........\n")
data_list,frame_numbers_list,labels_list,frames_path=init_vid(vid_dir = ADL_frame_dir, vid_class = 0, dset=dset,process_list=['Processed','ROI_frame'],win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP,data_shape=config.LOAD_DATA_SHAPE,fall_labels=None,ROI_array=ADL_ROI[vid_name]['BOX'])
ADL['ROI_FRAME']=data_list
ADL['NUMBER']=frame_numbers_list
ADL['LABELS']=labels_list
ADL['PATH']=frames_path
print("\nCreating MASK data...........\n")
ADL['MASK']=create_ROI_mask(ROI_boxes=ADL_ROI[vid_name]['BOX'],ROI_numbers=ADL_ROI[vid_name]['NUMBER'],img_shape=(config.HEIGHT,config.WIDTH,1),load_shape=config.LOAD_DATA_SHAPE,win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP)
else:
print('\Invalid input_type. Skipping this video.........\n')
ADL_videos[vid_name]=ADL
return ADL_videos
elif vid_class=='Fall':
Fall_videos={}
if input_type=='ROI_FRAME':
#het ROI boxes and frame numbers
Fall_ROI=get_ROI_boxes(dset=config.track_root_folder,class_='Fall')
#Load labels from csv files
fall_labels=load_fall_labels(config.label_csv_path)
#Processing each video
for Fall_frame_dir in Fall_frame_dir_list:
vid_name=os.path.basename(Fall_frame_dir)
print("\nLoading Video...........",vid_name)
Fall={}
if input_type=='FRAME':
print("\nLoading frame data...........\n")
data_list,frame_numbers_list,labels_list,frames_path=init_vid(vid_dir = Fall_frame_dir, vid_class = 1, dset=dset,process_list=['Processed'],win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP,data_shape=config.LOAD_DATA_SHAPE,fall_labels=fall_labels[vid_name])
Fall['FRAME']=data_list
Fall['NUMBER']=frame_numbers_list
Fall['LABELS']=labels_list
Fall['PATH']=frames_path
elif input_type=='ROI_FRAME':
print("\nLoading data and masking...........\n")
data_list,frame_numbers_list,labels_list,frames_path=init_vid(vid_dir = Fall_frame_dir, vid_class = 1, dset=dset,process_list=['Processed','ROI_frame'],win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP,data_shape=config.LOAD_DATA_SHAPE,fall_labels=fall_labels[vid_name],ROI_array=Fall_ROI[vid_name]['BOX'])
Fall['ROI_FRAME']=data_list
Fall['NUMBER']=frame_numbers_list
Fall['LABELS']=labels_list
Fall['PATH']=frames_path
print("\nLoading MASK data...........\n")
mask_list=create_ROI_mask(ROI_boxes=Fall_ROI[vid_name]['BOX'],ROI_numbers=Fall_ROI[vid_name]['NUMBER'],img_shape=(config.HEIGHT,config.WIDTH,1),load_shape=config.LOAD_DATA_SHAPE,win_length=config.WIN_LENGTH,split_gap=config.SPLIT_GAP)
Fall['MASK']=mask_list
Fall['START_END']=fall_labels[vid_name]
Fall_videos[vid_name]=Fall
return Fall_videos
else:
print("Invalid vid_clas type")
sys.exit(0)
def flow_from_path_list(sub_vid_path_list):
'''
Compute optical flow for a
'''
vid_flow_list=[]
for sub_vid_path in tqdm.tqdm(sub_vid_path_list):
flow=computeOpticalFlow(sub_vid_path,config.LOAD_DATA_SHAPE[0],config.LOAD_DATA_SHAPE[1])
print('Number of frames',len(sub_vid_path))
print('Flow shape',flow.shape)
# print(np.isnan(flow).any())
vid_flow_list.append(flow)
return vid_flow_list
def load_flow_from_folder(vid_flow_dir):
''''
load all the stored optical flow computed for all the subvideos of a video
'''
files=glob.glob(vid_flow_dir+'/*.npy')
# print(files)
sorted_files = sorted(files, key=lambda x: int(x.split('/')[-1].split('.')[0]))
# print(sorted_files)
num=len(sorted_files)
vid_flow_list=[]
for i in range(num):
flow=np.load(sorted_files[i])
vid_flow_list.append(flow)
print(str(i)+' subvid flow shape')
print(flow.shape)
return vid_flow_list
def save_flow_folder(vid_flow_list,vid_flow_dir):
'''
save optical flow computed for subvideos
'''
os.makedirs(vid_flow_dir,exist_ok=True)
num=len(vid_flow_list)
for i in range(num):
print(str(i)+' subvid flow shape')
print(vid_flow_list[i].shape)
save_path=os.path.join(vid_flow_dir,str(i))
np.save(save_path,vid_flow_list[i])
def load_optical_flow_dataset(videos,vid_class='ADL'):
os.makedirs(config.flow_dir,exist_ok=True)
if vid_class=='ADL':
ADL_videos=videos
for vid_name in ADL_videos.keys():
# for vid_name in ['ADL3']:
print('------------')
print('Video Name', vid_name)
print('------------')
vid_flow_dir=os.path.join(config.flow_dir,vid_name)
if os.path.isdir(vid_flow_dir):
print("Loading flow from:",vid_flow_dir)
ADL_videos[vid_name]['FLOW']=load_flow_from_folder(vid_flow_dir)
else:
print("Saving flow at:",vid_flow_dir)
vid_flow_list=flow_from_path_list(ADL_videos[vid_name]['PATH'])
ADL_videos[vid_name]['FLOW']=vid_flow_list
save_flow_folder(vid_flow_list,vid_flow_dir)
elif vid_class=='Fall':
Fall_videos=videos
for vid_name in Fall_videos.keys():
print('------------')
print('Video Name', vid_name)
print('------------')
vid_flow_dir=os.path.join(config.flow_dir,vid_name)
if os.path.isdir(vid_flow_dir):
print("Loading flow from:",vid_flow_dir)
Fall_videos[vid_name]['FLOW']=load_flow_from_folder(vid_flow_dir)
else:
print("saving flow at:",vid_flow_dir)
vid_flow_list=flow_from_path_list(Fall_videos[vid_name]['PATH'])
Fall_videos[vid_name]['FLOW']=vid_flow_list
save_flow_folder(vid_flow_list,vid_flow_dir)
| 43.554622 | 328 | 0.665509 |
709bb5f9008e413b6ac19073761e4686aab525d0 | 897 | py | Python | parse_args.py | cfhaiteeh/FightRoute | 0055b13848d957d3876049ab8cba45d268fc34d6 | [
"MIT"
] | 13 | 2019-11-11T10:26:19.000Z | 2021-07-03T05:08:06.000Z | parse_args.py | cfhaiteeh/FightRoute | 0055b13848d957d3876049ab8cba45d268fc34d6 | [
"MIT"
] | null | null | null | parse_args.py | cfhaiteeh/FightRoute | 0055b13848d957d3876049ab8cba45d268fc34d6 | [
"MIT"
] | 3 | 2020-06-16T06:21:05.000Z | 2020-08-19T04:03:11.000Z | import sys
args = sys.argv
import os
import argparse
def interpret_args():
""" Interprets the command line arguments, and returns a dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument("--alpha1", type=int, default=25)
parser.add_argument("--alpha2", type=int, default=15)
parser.add_argument("--beta1", type=int, default=20)
parser.add_argument("--beta2", type=int, default=25)
parser.add_argument("--thet", type=int, default=30)
parser.add_argument("--delta", type=float, default=0.001)
parser.add_argument("--max_point", type=int, default=270000)
parser.add_argument("--prob", type=bool, default=False)
parser.add_argument("--top5", type=bool, default=False)
parser.add_argument(
'--data_path',
type=str,
default='data/data1.csv')
args = parser.parse_args()
print(args)
return args
| 26.382353 | 76 | 0.672241 |
cc00cdd47567a7e5079181a206045efc028f406e | 6,940 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/enum_arrays.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 3 | 2021-05-19T03:12:48.000Z | 2022-01-28T19:15:42.000Z | samples/openapi3/client/petstore/python/petstore_api/model/enum_arrays.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 3 | 2021-05-11T23:55:26.000Z | 2022-02-27T11:17:21.000Z | samples/openapi3/client/petstore/python/petstore_api/model/enum_arrays.py | gasugesu/openapi-generator | e1c43f135639b9f300350f788fec98bbc375c932 | [
"Apache-2.0"
] | 1 | 2020-10-05T11:13:04.000Z | 2020-10-05T11:13:04.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class EnumArrays(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('just_symbol',): {
'>=': ">=",
'$': "$",
},
('array_enum',): {
'FISH': "fish",
'CRAB': "crab",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'just_symbol': (str,), # noqa: E501
'array_enum': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'just_symbol': 'just_symbol', # noqa: E501
'array_enum': 'array_enum', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""EnumArrays - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
just_symbol (str): [optional] # noqa: E501
array_enum ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.555556 | 174 | 0.574207 |
dc8a9e90f874644b9f48dd9d8dfb24a5280414cf | 3,447 | py | Python | exchangerates/__init__.py | andylolz/exchangerates | d436698a496f1b8b7234da2e313538ea9af62a9d | [
"MIT"
] | null | null | null | exchangerates/__init__.py | andylolz/exchangerates | d436698a496f1b8b7234da2e313538ea9af62a9d | [
"MIT"
] | null | null | null | exchangerates/__init__.py | andylolz/exchangerates | d436698a496f1b8b7234da2e313538ea9af62a9d | [
"MIT"
] | null | null | null | from bisect import bisect_left
import csv
import datetime
import get_rates
import pkg_resources
class UnknownCurrencyException(Exception):
pass
def make_date_from_iso(iso_str):
return datetime.date(int(iso_str[:4]), int(iso_str[5:7]),
int(iso_str[8:10]))
def take_closest(myList, myNumber):
# Source: http://stackoverflow.com/a/12141511
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
class CurrencyConverter(object):
def __init__(self, update=False, source=False):
def load_rates():
"""
Read CSV file as generator function
"""
if self.source == False:
resource_package = __name__
resource_path = 'consolidated_rates.csv'
source = pkg_resources.resource_filename(resource_package, resource_path)
if update == True:
get_rates.update_rates(source)
with open(source, "rU") as data:
csv_reader = csv.reader(data)
csv_reader.next()
for row in csv_reader:
yield row
def make_rates(rates_list):
"""
Sort rates into nice dictionary of currency: dates
"""
def append_path(root, paths):
child = root.setdefault(paths[0], {})
root[paths[0]].update(paths[1])
rates_dates = {}
for row in rates_list:
append_path(rates_dates, (row[2],
{ make_date_from_iso(row[0]): float(row[1]) } ))
currencies_dates = dict(map(lambda currency:
(currency, sorted(list(rates_dates[currency]))),
rates_dates.keys()))
return currencies_dates, rates_dates
self.source = source
self.currencies_dates, self.dates_rates = make_rates(load_rates())
def known_currencies(self):
return ",".join(sorted(self.currencies_dates.keys()))
def closest_rate(self, currency, date):
"""
Accepts a list with (currency, date)
returns currency, date, conversion date, exchange rate
"""
if currency == u"USD":
return { "closest_date": date, "conversion_rate": 1.0 }
try:
the_date = take_closest(self.currencies_dates[currency], date)
return {
"closest_date": the_date,
"conversion_rate": self.dates_rates[currency][the_date]
}
except KeyError:
raise UnknownCurrencyException("Unknown currency: {}".format(
currency))
if __name__ == "__main__":
"""
Example output
"""
converter = CurrencyConverter(update=True)
print "Available currencies: {}".format(converter.known_currencies())
print converter.closest_rate("USD", datetime.date(2012,7,20))
print converter.closest_rate("EUR", datetime.date(2014,7,20))
print converter.closest_rate("EUR", datetime.date(2014,7,20)) | 35.173469 | 89 | 0.581085 |
028ee13e130aca9f9959ada916c12afb31157d56 | 2,967 | py | Python | tracker/views.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | 1 | 2021-07-28T04:21:12.000Z | 2021-07-28T04:21:12.000Z | tracker/views.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | null | null | null | tracker/views.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | null | null | null | from django.http.response import JsonResponse
from django.shortcuts import get_object_or_404, render
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.views import View
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from .middleware import protected_route, bot_joined
from .models import Guild, Snapshot
class WeekDaysMeta(type):
def __new__(cls, *args, **kwargs) -> type:
instance = super().__new__(cls, *args, **kwargs)
instance.days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
return instance
class WeekDays(metaclass=WeekDaysMeta):
def __init__(self, value: int) -> None:
assert -1 < value < 7 and isinstance(value, int)
self._value_ = value
def __str__(self) -> str:
return self.__class__.days[self._value_]
class Index(View):
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
context = {
'guilds': list(request.user.guild_set.all()),
}
return render(request, 'home.html', context)
class ServerListView(View):
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
context = {
'guilds': list(request.user.guild_set.all()),
}
return render(request, 'guilds.html', context)
class ServerView(View):
def get(self, request: HttpRequest, guild_id: str, *args, **kwargs) -> HttpResponse:
def to_locale(snapshot: Snapshot) -> Snapshot:
snapshot.date = timezone.localtime(snapshot.date)
return snapshot
try:
context = {
'guild': Guild.objects.get(guild_id=guild_id),
'guilds': list(request.user.guild_set.all()),
'snapshots': {day:
[snapshot for snapshot in
list(map(to_locale, Snapshot.objects.filter(guild__guild_id=guild_id, date__gte=timezone.now() - timezone.timedelta(days=7)))) if str(WeekDays(snapshot.date.weekday())) == day]
for day in WeekDays.days},
}
except ObjectDoesNotExist:
return HttpResponseNotFound('<h1>not found</h1>')
return render(request, 'guild.html', context)
@protected_route
def patch(self, request: HttpRequest, guild_id: str, *args, **kwargs) -> HttpResponse:
try:
guild: Guild = Guild.objects.get(guild_id=guild_id)
except ObjectDoesNotExist:
return HttpResponse(status=404)
if guild.members is None:
return HttpResponse(status=200)
guild.increment_member_count()
return HttpResponse(status=200)
@protected_route
def post(self, request: HttpRequest, guild_id: str, *args, **kwargs) -> HttpResponse:
# TODO: Snapshot creation protocol (only the bot can trigger a creation from this route)
...
class GraphView(View):
@bot_joined
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
context = {
'guild': request.guild,
'guilds': list(request.user.guild_set.all()),
}
return render(request, 'graph.html', context)
| 36.182927 | 188 | 0.694641 |
684b83abe8d6e9a2b288be48eaadb2e0d558aaf7 | 946 | py | Python | test/test_v1_timestamps.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 7 | 2020-12-21T05:24:24.000Z | 2022-02-12T20:55:32.000Z | test/test_v1_timestamps.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | 6 | 2020-09-16T07:23:34.000Z | 2022-01-18T12:05:30.000Z | test/test_v1_timestamps.py | metal-stack/metal-python | cdf40fa86d2b2944f9818cef1c6723b1eecc506e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import metal_python
from metal_python.models.v1_timestamps import V1Timestamps # noqa: E501
from metal_python.rest import ApiException
class TestV1Timestamps(unittest.TestCase):
"""V1Timestamps unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1Timestamps(self):
"""Test V1Timestamps"""
# FIXME: construct object with mandatory attributes with example values
# model = metal_python.models.v1_timestamps.V1Timestamps() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.073171 | 156 | 0.705074 |
10385f513eb620ee76cee002f03cf9df164fefd9 | 372 | py | Python | plugin.video.youtube/resources/lib/default.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.youtube/resources/lib/default.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.youtube/resources/lib/default.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from youtube_plugin.kodion import runner
from youtube_plugin import youtube
__provider__ = youtube.Provider()
runner.run(__provider__)
| 23.25 | 57 | 0.728495 |
80fe314fd26a8ad7d3d92d7fb3a9861019c6b4a7 | 1,148 | py | Python | project/test/backend/pages/categorias/test_category_slug.py | fael07/portfolio | 9f2bb7f85d195d4f181c59426f653652b3d7b78a | [
"MIT"
] | null | null | null | project/test/backend/pages/categorias/test_category_slug.py | fael07/portfolio | 9f2bb7f85d195d4f181c59426f653652b3d7b78a | [
"MIT"
] | null | null | null | project/test/backend/pages/categorias/test_category_slug.py | fael07/portfolio | 9f2bb7f85d195d4f181c59426f653652b3d7b78a | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from backend.categories import Category
from backend.core.app.models import Project
from worker.views.manager import create_cache
from worker.views.pages.categorias.category_slug import create_project_list, create_project_list_box
class TestCategoryPage(TestCase):
def setUp(self):
self.client = Client()
self.category = Category.objects.create(
name='test', slug='test',
img='categories/python.png',
)
self.project = Project(
name='test', slug='test',
img='project/2022/03/14/p1.JPG',
project_url='test.com',
github_url='test.com',
category=self.category,
)
self.project.save()
create_cache()
self.request = self.client.get(f'/categorias/{self.category.slug}')
def test_status(self):
self.assertEqual(self.request.status_code, 200)
def test_main_content(self):
projects_list_html = str(create_project_list(self.category.projects.all()))
self.assertIn(projects_list_html, str((self.request.content).decode('utf-8'))) | 34.787879 | 100 | 0.662892 |
83caf3e0148b0fffa508810cfe4874439b572b28 | 5,078 | py | Python | train.py | amore-upf/masked-coreference | fe2386da21ad54d90dd60d7ccbae7e24f323a072 | [
"Apache-2.0"
] | null | null | null | train.py | amore-upf/masked-coreference | fe2386da21ad54d90dd60d7ccbae7e24f323a072 | [
"Apache-2.0"
] | null | null | null | train.py | amore-upf/masked-coreference | fe2386da21ad54d90dd60d7ccbae7e24f323a072 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import torch
import tensorflow as tf
import util
import logging
import numpy as np
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
logging.basicConfig(format=format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if __name__ == "__main__":
config = util.initialize_from_env()
report_frequency = config["report_frequency"]
eval_frequency = config["eval_frequency"]
eval_masked = 5
#eval_frequency = 100 # for debugging
model = util.get_model(config)
saver = tf.train.Saver()
log_dir = config["log_dir"]
max_steps = config['num_epochs'] * config['num_docs']
writer = tf.summary.FileWriter(log_dir, flush_secs=20)
max_f1 = 0
mode = 'w'
max_antecedent_f1 = 0 # added
masked = config['mask_percentage'] > 0
with tf.Session() as session:
# Initialize
session.run(tf.global_variables_initializer())
# load_data altogether
model.start_enqueue_thread(session)
accumulated_loss = 0.0
ckpt = tf.train.get_checkpoint_state(log_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Restoring from: {}".format(ckpt.model_checkpoint_path))
saver.restore(session, ckpt.model_checkpoint_path)
mode = 'a'
fh = logging.FileHandler(os.path.join(log_dir, 'stdout.log'), mode=mode)
fh.setFormatter(logging.Formatter(format))
logger.addHandler(fh)
initial_time = time.time()
while True:
tf_loss, tf_global_step, _ = session.run([model.loss, model.global_step, model.train_op])
accumulated_loss += tf_loss
if tf_global_step % report_frequency == 0:
total_time = time.time() - initial_time
steps_per_second = tf_global_step / total_time
average_loss = accumulated_loss / report_frequency
logger.info("[{}] loss={:.2f}, steps/s={:.2f}".format(tf_global_step, average_loss, steps_per_second))
writer.add_summary(util.make_summary({"loss": average_loss}), tf_global_step)
accumulated_loss = 0.0
if tf_global_step > 0 and tf_global_step % eval_frequency == 0:
saver.save(session, os.path.join(log_dir, "model"), global_step=tf_global_step)
'''
if max_percentage > 0: evaluation on masks only too
Stopping criterion: model with max sum of both accuracies
'''
i = 0
eval_f1_mask_all, eval_antecedent_f1_mask_all, eval_antecedent_f1_masks_only_all = [], [], []
while i < eval_masked:
print('Evaluation with masks - ', i)
eval_summary_mask, eval_f1_mask, eval_antecedent_f1_mask, eval_antecedent_f1_masks_only = model.evaluate(session, tf_global_step, masked = True, eval_on_masks_only = True,
with_gold_mentions = config['with_gold_mentions'])
eval_f1_mask_all.append(eval_f1_mask)
eval_antecedent_f1_mask_all.append(eval_antecedent_f1_mask)
eval_antecedent_f1_masks_only_all.append(eval_antecedent_f1_masks_only)
writer.add_summary(eval_summary_mask, tf_global_step)
i +=1
eval_f1_mask = np.average(eval_f1_mask_all)
eval_antecedent_f1_mask = np.average(eval_antecedent_f1_mask_all)
eval_antecedent_f1_masks_only = np.average(eval_antecedent_f1_masks_only_all)
# keep model with max f1 mask
global_eval_f1 = eval_f1_mask
print('Evaluation without masks')
eval_summary, eval_f1, eval_antecedent_f1 = model.evaluate(session, tf_global_step, with_gold_mentions = config['with_gold_mentions'])
if not masked:
global_eval_f1 = eval_f1
writer.add_summary(eval_summary, tf_global_step)
if global_eval_f1 > max_f1:
max_f1 = global_eval_f1
max_scores = {'coref_f1': eval_f1, 'antecedent_f1': eval_antecedent_f1, 'coref_withmasks_f1': eval_f1_mask, 'antecedent_withmasks_f1': eval_antecedent_f1_mask, 'antecedent_onlymasks_f1': eval_antecedent_f1_masks_only}
util.copy_checkpoint(os.path.join(log_dir, "model-{}".format(tf_global_step)), os.path.join(log_dir, "model.max.ckpt"))
writer.add_summary(util.make_summary({"max_eval_f1": max_f1}), tf_global_step)
logger.info("[{}] evaL_f1={:.4f}, max_f1={:.4f}".format(tf_global_step, eval_f1, max_f1))
logger.info("[{}] evaL_antecedent_f1={:.4f}, max_f1={:.4f}".format(tf_global_step, eval_antecedent_f1, max_f1))
logger.info("[{}] evaL_mask_f1={:.4f}, max_f1={:.4f}".format(tf_global_step, eval_f1_mask, max_f1))
logger.info("[{}] evaL_mask_antecedent_f1={:.4f}, max_f1={:.4f}".format(tf_global_step, eval_antecedent_f1_mask, max_f1))
logger.info("[{}] evaL_mask_antecedent_f1_selected={:.4f}, max_f1={:.4f}".format(tf_global_step, eval_antecedent_f1_masks_only, max_f1))
if tf_global_step > max_steps:
logger.info(str(max_scores))
break
| 42.316667 | 227 | 0.690429 |
527694e9bd95df91e944cf242a9952d06da7a9b8 | 4,252 | py | Python | cloudshell/shell/standards/core/autoload/utils.py | QualiSystems/cloudshell-shell-standards | a8bf7df7814c70df6d89bddcda51220f3b5329c9 | [
"Apache-2.0"
] | null | null | null | cloudshell/shell/standards/core/autoload/utils.py | QualiSystems/cloudshell-shell-standards | a8bf7df7814c70df6d89bddcda51220f3b5329c9 | [
"Apache-2.0"
] | 15 | 2019-04-23T08:43:35.000Z | 2021-03-10T12:46:32.000Z | cloudshell/shell/standards/core/autoload/utils.py | QualiSystems/cloudshell-shell-standards | a8bf7df7814c70df6d89bddcda51220f3b5329c9 | [
"Apache-2.0"
] | 1 | 2020-11-20T20:58:27.000Z | 2020-11-20T20:58:27.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import warnings
from cloudshell.shell.core.driver_context import (
AutoLoadAttribute,
AutoLoadDetails,
AutoLoadResource,
)
class AutoloadDetailsBuilder(object):
def __init__(
self, resource_model, filter_empty_modules=False, use_new_unique_id=False
):
"""Autoload Details Builder.
:param cloudshell.shell.standards.autoload_generic_models.GenericResourceModel resource_model: # noqa: E501
:param bool filter_empty_modules:
:param bool use_new_unique_id: use CS resource Id for creating unique id
"""
if not filter_empty_modules:
# todo v2.0 - set filter_empty_modules=True by default
warnings.warn(
"Empty modules would be filtered by default in next major version",
PendingDeprecationWarning,
)
if not use_new_unique_id:
# todo v2.0 - always use CS Id for generating unique id
warnings.warn(
"CS resource Id would be used by default in next major version",
PendingDeprecationWarning,
)
self.resource_model = resource_model
self._filter_empty_modules = filter_empty_modules
self._cs_resource_id = (
resource_model.cs_resource_id if use_new_unique_id else None
)
def _build_branch(self, resource):
"""Build a branch.
:param cloudshell.shell.standards.core.autoload.resource_model.AbstractResource resource: # noqa: E501
:rtype: cloudshell.shell.core.driver_context.AutoLoadDetails
"""
resource.shell_name = resource.shell_name or self.resource_model.shell_name
relative_address = str(resource.relative_address)
unique_identifier = get_unique_id(self._cs_resource_id, resource)
autoload_details = AutoLoadDetails([], [])
if relative_address:
autoload_details.resources = [
AutoLoadResource(
model=resource.cloudshell_model_name,
name=resource.name,
relative_address=relative_address,
unique_identifier=unique_identifier,
)
]
autoload_details.attributes = [
AutoLoadAttribute(
relative_address=relative_address,
attribute_name=str(name),
attribute_value=str(value),
)
for name, value in resource.attributes.items()
if value is not None
]
for child_resource in resource.extract_sub_resources():
# skip modules and sub modules without children
if self._filter_empty_modules and is_module_without_children(
child_resource
):
continue
child_details = self._build_branch(child_resource)
autoload_details.resources.extend(child_details.resources)
autoload_details.attributes.extend(child_details.attributes)
return autoload_details
def build_details(self):
"""Build resource details.
:rtype: cloudshell.shell.core.driver_context.AutoLoadDetails
"""
return self._build_branch(self.resource_model)
def get_unique_id(cs_resource_id, resource):
"""Get unique ID for the resource.
If we have cs_resource_id use it for creating unique id.
:type cs_resource_id: str
:param cloudshell.shell.standards.core.autoload.resource_model.AbstractResource resource: # noqa: E501
:rtype: str
"""
if cs_resource_id:
unique_id = "{}+{}".format(cs_resource_id, resource.unique_identifier)
unique_id = str(hash(unique_id))
else:
unique_id = str(resource.unique_identifier)
return unique_id
def is_module_without_children(resource):
from cloudshell.shell.standards.autoload_generic_models import (
GenericModule,
GenericSubModule,
)
children = resource.extract_sub_resources()
if isinstance(resource, GenericSubModule):
return not children
elif isinstance(resource, GenericModule):
return all(map(is_module_without_children, children))
else:
return False
| 35.731092 | 116 | 0.654516 |
093dc5c4a58f5c5bb39c91a845e3c4212802c62d | 491 | py | Python | output/models/ms_data/regex/re_l4_xsd/re_l4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_l4_xsd/re_l4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_l4_xsd/re_l4.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"\p{IsLatinExtended-B}+",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| 17.535714 | 49 | 0.521385 |
8fe74f683c0bf32e3c51ca70712488aec2ac0851 | 9,429 | py | Python | gala-ragdoll/ragdoll/models/realconf_base_info.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | gala-ragdoll/ragdoll/models/realconf_base_info.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | gala-ragdoll/ragdoll/models/realconf_base_info.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from ragdoll.models.base_model_ import Model
from ragdoll import util
class RealconfBaseInfo(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, path: str=None, file_path: str=None, rpm_name: str=None, rpm_version: str=None, rpm_release: str=None, file_attr: str=None, file_owner: str=None, conf_type: str=None, spacer: str=None, conf_contens: str=None): # noqa: E501
"""RealconfBaseInfo - a model defined in Swagger
:param path: The path of this RealconfBaseInfo. # noqa: E501
:type path: str
:param file_path: The file_path of this RealconfBaseInfo. # noqa: E501
:type file_path: str
:param rpm_name: The rpm_name of this RealconfBaseInfo. # noqa: E501
:type rpm_name: str
:param rpm_version: The rpm_version of this RealconfBaseInfo. # noqa: E501
:type rpm_version: str
:param rpm_release: The rpm_release of this RealconfBaseInfo. # noqa: E501
:type rpm_release: str
:param file_attr: The file_attr of this RealconfBaseInfo. # noqa: E501
:type file_attr: str
:param file_owner: The file_owner of this RealconfBaseInfo. # noqa: E501
:type file_owner: str
:param conf_type: The conf_type of this RealconfBaseInfo. # noqa: E501
:type conf_type: str
:param spacer: The spacer of this RealconfBaseInfo. # noqa: E501
:type spacer: str
:param conf_contens: The conf_contens of this RealconfBaseInfo. # noqa: E501
:type conf_contens: str
"""
self.swagger_types = {
'path': str,
'file_path': str,
'rpm_name': str,
'rpm_version': str,
"rpm_release": str,
'file_attr': str,
'file_owner': str,
'conf_type': str,
'spacer': str,
'conf_contens': str
}
self.attribute_map = {
'path': 'path',
'file_path': 'filePath',
'rpm_name': 'rpmName',
'rpm_version': 'rpmVersion',
'rpm_release': 'rpmRelease',
'file_attr': 'fileAttr',
'file_owner': 'fileOwner',
'conf_type': 'confType',
'spacer': 'spacer',
'conf_contens': 'confContents'
}
self._path = path
self._file_path = file_path
self._rpm_name = rpm_name
self._rpm_version = rpm_version
self._rpm_release = rpm_release
self._file_attr = file_attr
self._file_owner = file_owner
self._conf_type = conf_type
self._spacer = spacer
self._conf_contens = conf_contens
@classmethod
def from_dict(cls, dikt) -> 'RealconfBaseInfo':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The RealconfBaseInfo of this RealconfBaseInfo. # noqa: E501
:rtype: RealconfBaseInfo
"""
return util.deserialize_model(dikt, cls)
@property
def path(self) -> str:
"""Gets the path of this RealconfBaseInfo.
The currently configured xpath path # noqa: E501
:return: The path of this RealconfBaseInfo.
:rtype: str
"""
return self._path
@path.setter
def path(self, path: str):
"""Sets the path of this RealconfBaseInfo.
The currently configured xpath path # noqa: E501
:param path: The path of this RealconfBaseInfo.
:type path: str
"""
self._path = path
@property
def file_path(self) -> str:
"""Gets the file_path of this RealconfBaseInfo.
The real file path of the configuration item in OS # noqa: E501
:return: The file_path of this RealconfBaseInfo.
:rtype: str
"""
return self._file_path
@file_path.setter
def file_path(self, file_path: str):
"""Sets the file_path of this RealconfBaseInfo.
The real file path of the configuration item in OS # noqa: E501
:param file_path: The file_path of this RealconfBaseInfo.
:type file_path: str
"""
self._file_path = file_path
@property
def rpm_name(self) -> str:
"""Gets the rpm_name of this RealconfBaseInfo.
:return: The rpm_name of this RealconfBaseInfo.
:rtype: str
"""
return self._rpm_name
@rpm_name.setter
def rpm_name(self, rpm_name: str):
"""Sets the rpm_name of this RealconfBaseInfo.
:param rpm_name: The rpm_name of this RealconfBaseInfo.
:type rpm_name: str
"""
self._rpm_name = rpm_name
@property
def rpm_version(self) -> str:
"""Gets the rpm_version of this RealconfBaseInfo.
:return: The rpm_version of this RealconfBaseInfo.
:rtype: str
"""
return self._rpm_version
@rpm_version.setter
def rpm_version(self, rpm_version: str):
"""Sets the rpm_version of this RealconfBaseInfo.
:param rpm_version: The rpm_version of this RealconfBaseInfo.
:type rpm_version: str
"""
self._rpm_version = rpm_version
@property
def rpm_release(self) -> str:
"""Gets the rpm_release of this RealconfBaseInfo.
:return: The rpm_release of this RealconfBaseInfo.
:rtype: str
"""
return self._rpm_release
@rpm_release.setter
def rpm_release(self, rpm_release: str):
"""Sets the rpm_release of this RealconfBaseInfo.
:param rpm_release: The rpm_release of this RealconfBaseInfo.
:type rpm_release: str
"""
self._rpm_release = rpm_release
@property
def file_attr(self) -> str:
"""Gets the file_attr of this RealconfBaseInfo.
the attributes for the real file of the configuration item in OS # noqa: E501
:return: The file_attr of this RealconfBaseInfo.
:rtype: str
"""
return self._file_attr
@file_attr.setter
def file_attr(self, file_attr: str):
"""Sets the file_attr of this RealconfBaseInfo.
the attributes for the real file of the configuration item in OS # noqa: E501
:param file_attr: The file_attr of this RealconfBaseInfo.
:type file_attr: str
"""
self._file_attr = file_attr
@property
def file_owner(self) -> str:
"""Gets the file_owner of this RealconfBaseInfo.
the owner information for the real file of the configuration item in OS # noqa: E501
:return: The file_owner of this RealconfBaseInfo.
:rtype: str
"""
return self._file_owner
@file_owner.setter
def file_owner(self, file_owner: str):
"""Sets the file_owner of this RealconfBaseInfo.
the owner information for the real file of the configuration item in OS # noqa: E501
:param file_owner: The file_owner of this RealconfBaseInfo.
:type file_owner: str
"""
self._file_owner = file_owner
@property
def conf_type(self) -> str:
"""Gets the conf_type of this RealconfBaseInfo.
the configuration type # noqa: E501
:return: The conf_type of this RealconfBaseInfo.
:rtype: str
"""
return self._conf_type
@conf_type.setter
def conf_type(self, conf_type: str):
"""Sets the conf_type of this RealconfBaseInfo.
the configuration type # noqa: E501
:param conf_type: The conf_type of this RealconfBaseInfo.
:type conf_type: str
"""
allowed_values = ["key-value", "toml", "json", "xml", "text"] # noqa: E501
if conf_type not in allowed_values:
raise ValueError(
"Invalid value for `conf_type` ({0}), must be one of {1}"
.format(conf_type, allowed_values)
)
self._conf_type = conf_type
@property
def spacer(self) -> str:
"""Gets the spacer of this RealconfBaseInfo.
spacers of configuration items in the configuration file # noqa: E501
:return: The spacer of this RealconfBaseInfo.
:rtype: str
"""
return self._spacer
@spacer.setter
def spacer(self, spacer: str):
"""Sets the spacer of this RealconfBaseInfo.
spacers of configuration items in the configuration file # noqa: E501
:param spacer: The spacer of this RealconfBaseInfo.
:type spacer: str
"""
self._spacer = spacer
@property
def conf_contens(self) -> str:
"""Gets the conf_contens of this RealconfBaseInfo.
the specific content of the configuration item # noqa: E501
:return: The conf_contens of this RealconfBaseInfo.
:rtype: str
"""
return self._conf_contens
@conf_contens.setter
def conf_contens(self, conf_contens: str):
"""Sets the conf_contens of this RealconfBaseInfo.
the specific content of the configuration item # noqa: E501
:param conf_contens: The conf_contens of this RealconfBaseInfo.
:type conf_contens: str
"""
self._conf_contens = conf_contens
| 29.744479 | 246 | 0.617881 |
b1d19b9053fac2c94c10b6b0b1e882442a9a64b3 | 14,487 | py | Python | sdk/python/pulumi_azure_native/compute/v20180401/get_virtual_machine_scale_set_vm.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20180401/get_virtual_machine_scale_set_vm.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20180401/get_virtual_machine_scale_set_vm.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetVMResult',
'AwaitableGetVirtualMachineScaleSetVMResult',
'get_virtual_machine_scale_set_vm',
]
@pulumi.output_type
class GetVirtualMachineScaleSetVMResult:
"""
Describes a virtual machine scale set virtual machine.
"""
def __init__(__self__, availability_set=None, diagnostics_profile=None, hardware_profile=None, id=None, instance_id=None, instance_view=None, latest_model_applied=None, license_type=None, location=None, name=None, network_profile=None, os_profile=None, plan=None, provisioning_state=None, resources=None, sku=None, storage_profile=None, tags=None, type=None, vm_id=None, zones=None):
if availability_set and not isinstance(availability_set, dict):
raise TypeError("Expected argument 'availability_set' to be a dict")
pulumi.set(__self__, "availability_set", availability_set)
if diagnostics_profile and not isinstance(diagnostics_profile, dict):
raise TypeError("Expected argument 'diagnostics_profile' to be a dict")
pulumi.set(__self__, "diagnostics_profile", diagnostics_profile)
if hardware_profile and not isinstance(hardware_profile, dict):
raise TypeError("Expected argument 'hardware_profile' to be a dict")
pulumi.set(__self__, "hardware_profile", hardware_profile)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_id and not isinstance(instance_id, str):
raise TypeError("Expected argument 'instance_id' to be a str")
pulumi.set(__self__, "instance_id", instance_id)
if instance_view and not isinstance(instance_view, dict):
raise TypeError("Expected argument 'instance_view' to be a dict")
pulumi.set(__self__, "instance_view", instance_view)
if latest_model_applied and not isinstance(latest_model_applied, bool):
raise TypeError("Expected argument 'latest_model_applied' to be a bool")
pulumi.set(__self__, "latest_model_applied", latest_model_applied)
if license_type and not isinstance(license_type, str):
raise TypeError("Expected argument 'license_type' to be a str")
pulumi.set(__self__, "license_type", license_type)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if os_profile and not isinstance(os_profile, dict):
raise TypeError("Expected argument 'os_profile' to be a dict")
pulumi.set(__self__, "os_profile", os_profile)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resources and not isinstance(resources, list):
raise TypeError("Expected argument 'resources' to be a list")
pulumi.set(__self__, "resources", resources)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if storage_profile and not isinstance(storage_profile, dict):
raise TypeError("Expected argument 'storage_profile' to be a dict")
pulumi.set(__self__, "storage_profile", storage_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vm_id and not isinstance(vm_id, str):
raise TypeError("Expected argument 'vm_id' to be a str")
pulumi.set(__self__, "vm_id", vm_id)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="availabilitySet")
def availability_set(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
"""
return pulumi.get(self, "availability_set")
@property
@pulumi.getter(name="diagnosticsProfile")
def diagnostics_profile(self) -> Optional['outputs.DiagnosticsProfileResponse']:
"""
Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15.
"""
return pulumi.get(self, "diagnostics_profile")
@property
@pulumi.getter(name="hardwareProfile")
def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']:
"""
Specifies the hardware settings for the virtual machine.
"""
return pulumi.get(self, "hardware_profile")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> str:
"""
The virtual machine instance ID.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> 'outputs.VirtualMachineScaleSetVMInstanceViewResponse':
"""
The virtual machine instance view.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter(name="latestModelApplied")
def latest_model_applied(self) -> bool:
"""
Specifies whether the latest model has been applied to the virtual machine.
"""
return pulumi.get(self, "latest_model_applied")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.NetworkProfileResponse']:
"""
Specifies the network interfaces of the virtual machine.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OSProfileResponse']:
"""
Specifies the operating system settings for the virtual machine.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
"""
Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def resources(self) -> Sequence['outputs.VirtualMachineExtensionResponse']:
"""
The virtual machine child extension resources.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The virtual machine SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
"""
Specifies the storage settings for the virtual machine disks.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> str:
"""
Azure VM unique ID.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter
def zones(self) -> Sequence[str]:
"""
The virtual machine zones.
"""
return pulumi.get(self, "zones")
class AwaitableGetVirtualMachineScaleSetVMResult(GetVirtualMachineScaleSetVMResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetVMResult(
availability_set=self.availability_set,
diagnostics_profile=self.diagnostics_profile,
hardware_profile=self.hardware_profile,
id=self.id,
instance_id=self.instance_id,
instance_view=self.instance_view,
latest_model_applied=self.latest_model_applied,
license_type=self.license_type,
location=self.location,
name=self.name,
network_profile=self.network_profile,
os_profile=self.os_profile,
plan=self.plan,
provisioning_state=self.provisioning_state,
resources=self.resources,
sku=self.sku,
storage_profile=self.storage_profile,
tags=self.tags,
type=self.type,
vm_id=self.vm_id,
zones=self.zones)
def get_virtual_machine_scale_set_vm(instance_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetVMResult:
"""
Describes a virtual machine scale set virtual machine.
:param str instance_id: The instance ID of the virtual machine.
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set.
"""
__args__ = dict()
__args__['instanceId'] = instance_id
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20180401:getVirtualMachineScaleSetVM', __args__, opts=opts, typ=GetVirtualMachineScaleSetVMResult).value
return AwaitableGetVirtualMachineScaleSetVMResult(
availability_set=__ret__.availability_set,
diagnostics_profile=__ret__.diagnostics_profile,
hardware_profile=__ret__.hardware_profile,
id=__ret__.id,
instance_id=__ret__.instance_id,
instance_view=__ret__.instance_view,
latest_model_applied=__ret__.latest_model_applied,
license_type=__ret__.license_type,
location=__ret__.location,
name=__ret__.name,
network_profile=__ret__.network_profile,
os_profile=__ret__.os_profile,
plan=__ret__.plan,
provisioning_state=__ret__.provisioning_state,
resources=__ret__.resources,
sku=__ret__.sku,
storage_profile=__ret__.storage_profile,
tags=__ret__.tags,
type=__ret__.type,
vm_id=__ret__.vm_id,
zones=__ret__.zones)
| 43.9 | 866 | 0.671292 |
48afed6ff9e8639a626a82ae0976ac26acb35dab | 4,243 | py | Python | src/encoded/types/user.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 102 | 2015-05-20T01:17:43.000Z | 2022-03-07T06:03:55.000Z | src/encoded/types/user.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 901 | 2015-01-07T23:11:57.000Z | 2022-03-18T13:56:12.000Z | src/encoded/types/user.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 65 | 2015-02-06T23:00:26.000Z | 2022-01-22T07:58:44.000Z | # -*- coding: utf-8 -*-
from pyramid.view import (
view_config,
)
from pyramid.security import (
Allow,
Deny,
Everyone,
)
from .base import (
Item,
)
from snovault import (
CONNECTION,
calculated_property,
collection,
load_schema,
)
from snovault.calculated import calculate_properties
from snovault.resource_views import item_view_object
from snovault.util import expand_path
ONLY_ADMIN_VIEW_DETAILS = [
(Allow, 'group.admin', ['view', 'view_details', 'edit']),
(Allow, 'group.read-only-admin', ['view', 'view_details']),
(Allow, 'remoteuser.INDEXER', ['view']),
(Allow, 'remoteuser.EMBED', ['view']),
(Deny, Everyone, ['view', 'view_details', 'edit']),
]
USER_ALLOW_CURRENT = [
(Allow, Everyone, 'view'),
] + ONLY_ADMIN_VIEW_DETAILS
USER_DELETED = [
(Deny, Everyone, 'visible_for_edit')
] + ONLY_ADMIN_VIEW_DETAILS
@collection(
name='users',
unique_key='user:email',
properties={
'title': 'DCC Users',
'description': 'Listing of current ENCODE DCC users',
},
acl=[])
class User(Item):
item_type = 'user'
schema = load_schema('encoded:schemas/user.json')
# Avoid access_keys reverse link so editing access keys does not reindex content.
embedded = [
'lab',
]
STATUS_ACL = {
'current': [(Allow, 'role.owner', ['edit', 'view_details'])] + USER_ALLOW_CURRENT,
'deleted': USER_DELETED,
'disabled': ONLY_ADMIN_VIEW_DETAILS,
}
@calculated_property(schema={
"title": "Title",
"type": "string",
})
def title(self, first_name, last_name):
return u'{} {}'.format(first_name, last_name)
def __ac_local_roles__(self):
owner = 'userid.%s' % self.uuid
return {owner: 'role.owner'}
@calculated_property(schema={
"title": "Access Keys",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "AccessKey.user",
},
}, category='page')
def access_keys(self, request):
if not request.has_permission('view_details'):
return
uuids = self.registry[CONNECTION].get_rev_links(self.model, 'user', 'AccessKey')
objects = (request.embed('/', str(uuid), '@@object') for uuid in uuids)
return [obj for obj in objects if obj['status'] not in ('deleted', 'replaced')]
@view_config(context=User, permission='view', request_method='GET', name='page')
def user_page_view(context, request):
if request.has_permission('view_details'):
properties = item_view_object(context, request)
else:
item_path = request.resource_path(context)
properties = request.embed(item_path, '@@object')
for path in context.embedded:
expand_path(request, properties, path)
calculated = calculate_properties(context, request, properties, category='page')
properties.update(calculated)
return properties
@view_config(context=User, permission='view', request_method='GET',
name='object')
def user_basic_view(context, request):
properties = item_view_object(context, request)
filtered = {}
for key in ['@id', '@type', 'uuid', 'lab', 'title', 'submits_for']:
try:
filtered[key] = properties[key]
except KeyError:
pass
return filtered
@calculated_property(context=User, category='user_action')
def impersonate(request):
# This is assuming the user_action calculated properties
# will only be fetched from the current_user view,
# which ensures that the user represented by 'context' is also an effective principal
if request.has_permission('impersonate'):
return {
'id': 'impersonate',
'title': 'Impersonate user…',
'href': '/#!impersonate-user',
}
@calculated_property(context=User, category='user_action')
def profile(context, request):
return {
'id': 'profile',
'title': 'Profile',
'href': request.resource_path(context),
}
@calculated_property(context=User, category='user_action')
def signout(context, request):
return {
'id': 'signout',
'title': 'Sign out',
'trigger': 'logout',
}
| 29.061644 | 90 | 0.631629 |
a47b32211c9425e14f09649431f5d179073bc009 | 82,603 | py | Python | reinforcement_learning.py | NasikNafi/TensorFlow-Tutorials | df9fa594ac3de0756603c81cc76a816324e357ba | [
"MIT"
] | null | null | null | reinforcement_learning.py | NasikNafi/TensorFlow-Tutorials | df9fa594ac3de0756603c81cc76a816324e357ba | [
"MIT"
] | null | null | null | reinforcement_learning.py | NasikNafi/TensorFlow-Tutorials | df9fa594ac3de0756603c81cc76a816324e357ba | [
"MIT"
] | null | null | null | ########################################################################
#
# Reinforcement Learning (Q-Learning) for Atari Games
#
# How to run:
#
# To train a Neural Network for playing the Atari game Breakout,
# run the following command in a terminal window.
#
# python reinforcement-learning.py --env 'Breakout-v0' --training
#
# The agent should start to improve after a few hours, but a full
# training run required 150 hours on a 2.6 GHz CPU and GTX 1070 GPU.
#
# The hyper-parameters were tuned for Breakout and did not work
# quite as well for SpaceInvaders. Can you find better parameters?
#
# Once the Neural Network has been trained, you can test it and
# watch it play the game by running this command in the terminal:
#
# python reinforcement-learning.py --env 'Breakout-v0' --render --episodes 2
#
# Requirements:
#
# - Python 3.6 (Python 2.7 may not work)
# - TensorFlow 1.1.0
# - OpenAI Gym 0.8.1
# - PrettyTensor 0.7.4 (not required if you use tf.layers instead)
#
# Summary:
#
# This program implements a variant of Reinforcement Learning known as
# Q-learning. Imagine that we have an agent that must take actions in
# some environment so as to maximize the cumulative reward over its life.
# The agent sees the state of the game-environment through images
# which are sent through a Neural Network in TensorFlow, so as to
# estimate which action is most likely to maximize the cumulative
# reward of all future actions. These action-values are also called
# Q-values. If the Q-values are known in advance, then the agent merely
# has to select the action corresponding to the highest Q-value in
# each state of the game. But the Q-values are not known in advance
# and must be learnt while the agent is playing the game.
# This is done by initializing all Q-values to zero and then having
# the agent take random actions. Whenever the agent obtains a reward,
# the estimated Q-values can be updated with the new information.
# The agent gradually learns to play the game better and better
# because the Neural Network becomes better at estimating the Q-values.
# But this process is very slow and the basic algorithm implemented
# here typically requires 100 million steps in the game-environment,
# although it will typically start to show improvement much sooner.
#
# Main classes:
#
# - MotionTracer:
#
# This takes raw images from the game-environment and processes them.
# The output is called a state and consists of two images of equal size:
# (1) The last image from the game-environment, resized and gray-scaled.
# (2) A motion-trace that shows the recent trajectories of objects.
#
# - ReplayMemory:
#
# Successive image-frames of the game-environment are almost identical.
# If we train the Neural Network to estimate Q-values from a small
# number of successive image-frames, then it cannot learn to distinguish
# important features and the training becomes unstable. For the basic
# Q-learning algorithm we need many thousand states from the game-environment
# in order to learn important features so the Q-values can be estimated.
#
# - NeuralNetwork:
#
# This implements a Neural Network for estimating Q-values. It takes as
# input a state of the game-environment that was output by the Motion Tracer,
# and then the Neural Network outputs the estimated Q-values that indicate
# the cumulative reward of taking each action for a given state of the game.
#
# - Agent:
#
# This implements the agent that plays games. It loads an Atari-game from
# OpenAI Gym and inputs the game-images to the Motion Tracer, which in turn
# outputs a state that is input to the Neural Network, which estimates the
# Q-values that are used for selecting the next action. The agent then
# takes a step in the game-environment. During training, the data is added
# to the Replay Memory and when it is sufficiently full, an optimization run
# is performed so as to improve the Neural Network's ability to estimate
# Q-values. This procedure is repeated many, many times until the Neural
# Network is sufficiently accurate at estimating Q-values.
#
# The Q-Value Formula:
#
# The Q-values for a given state is a vector with a value for each possible
# action, indicating the total future reward that can be had by taking each
# action. The Q-values are initialized to roughly zero and must then be
# improved iteratively when new information becomes available.
#
# We know which action was taken in the current step and what the observed
# reward was, so the estimated Q-value can be improved with this information.
# The Q-value estimates the total cumulative reward for all future steps, which
# is why we use the max Q-value for the next step.
#
# The formula for updating Q-values is implemented in the ReplayMemory-class
# in the function update_all_q_values(), which does a complete backwards-sweep
# through the Replay Memory. The formula for updating the Q-values is:
#
# Q-value for this state and action = observed reward for the current step
# + discount factor * max Q-value for next step
#
# The discount factor is a number slightly below 1.0 (e.g. 0.97) which causes
# distant future rewards to have a smaller effect on the Q-values. This means
# that if the reward is the same, then it is considered more valuable to get
# the reward sooner rather than later.
#
# Pseudo-Code:
#
# There are many lines of source-code required to implement all this, but the
# main ideas of the algorithm can be described more simply in pseudo-code:
#
# 1) Initialize all Q-values to roughly zero.
# We use a Neural Network to estimate the Q-values, so this means
# we have to initialize the Neural Network with small random weights.
#
# 2) Reset the game-environment and Motion Tracer.
#
# 3) Get the state from the Motion Tracer which consists of two gray-scale
# images. The first is the image of the game-environment and the second
# is a motion-trace showing recent movements in the game-environment.
#
# 4) Input the state to the Neural Network to estimate the Q-values.
#
# 5) Either take a random action with probability epsilon, or take the
# action with the highest Q-value. This is called the epsilon-greedy policy.
#
# 6) Add the state, action and observed reward to the Replay Memory.
#
# 7) When the Replay Memory is sufficiently full, first perform a full
# backwards-sweep to update all the Q-values with the observed rewards.
#
# Then perform an optimization run of the Neural Network.
# This takes random batches of data from the Replay Memory and uses them
# for training the Neural Network to become better at estimating Q-values.
#
# Save a checkpoint for the Neural Network so we can reload it later.
#
# 8) Input the recent image of the game-environment to the Motion Tracer
# and repeat from step (3).
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2017 by Magnus Erik Hvass Pedersen
#
########################################################################
import numpy as np
import tensorflow as tf
import gym
import scipy.ndimage
import sys
import os
import time
import csv
import argparse
import download
from random import *
########################################################################
# File-paths are global variables for convenience so they don't
# have to be passed around between all the objects.
# You should first set checkpoint_base_dir to whichever you like,
# then call the function update_paths(env_name) to update all the paths.
# This should be done before you create the Agent and NeuralNetwork etc.
# Default base-directory for the checkpoints and log-files.
# The environment-name will be appended to this.
checkpoint_base_dir = 'checkpoints_RL/'
# Combination of base-dir and environment-name.
checkpoint_dir = None
# Full path for the log-file for rewards.
log_reward_path = None
# Full path for the log-file for Q-values.
log_q_values_path = None
def update_paths(env_name):
"""
Update the path-names for the checkpoint-dir and log-files.
Call this after you have changed checkpoint_base_dir and
before you create the Neural Network.
:param env_name:
Name of the game-environment you will use in OpenAI Gym.
"""
global checkpoint_dir
global log_reward_path
global log_q_values_path
# Add the environment-name to the checkpoint-dir.
checkpoint_dir = os.path.join(checkpoint_base_dir, env_name)
# Create the checkpoint-dir if it does not already exist.
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# File-path for the log-file for episode rewards.
log_reward_path = os.path.join(checkpoint_dir, "log_reward.txt")
# File-path for the log-file for Q-values.
log_q_values_path = os.path.join(checkpoint_dir, "log_q_values.txt")
########################################################################
# Download TensorFlow checkpoints.
# URL's for the checkpoint-files.
_checkpoint_url = {
"Breakout-v0": "http://hvass-labs.org/projects/tensorflow/tutorial16/Breakout-v0.tar.gz",
"SpaceInvaders-v0": "http://hvass-labs.org/projects/tensorflow/tutorial16/SpaceInvaders-v0.tar.gz"
}
def maybe_download_checkpoint(env_name):
"""
Download and extract the TensorFlow checkpoint for the given
environment-name, if it does not already exist in checkpoint_base_dir.
You should first set this dir and call update_paths().
"""
# Get the url for the game-environment.
url = _checkpoint_url[env_name]
# Download and extract the file if it does not already exist.
download.maybe_download_and_extract(url=url,
download_dir=checkpoint_base_dir)
########################################################################
# Classes used for logging data during training.
class Log:
"""
Base-class for logging data to a text-file during training.
It is possible to use TensorFlow / TensorBoard for this,
but it is quite awkward to implement, as it was intended
for logging variables and other aspects of the TensorFlow graph.
We want to log the reward and Q-values which are not in that graph.
"""
def __init__(self, file_path):
"""Set the path for the log-file. Nothing is saved or loaded yet."""
# Path for the log-file.
self.file_path = file_path
# Data to be read from the log-file by the _read() function.
self.count_episodes = None
self.count_states = None
self.data = None
def _write(self, count_episodes, count_states, msg):
"""
Write a line to the log-file. This is only called by sub-classes.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param msg:
Message to write in the log.
"""
with open(file=self.file_path, mode='a', buffering=1) as file:
msg_annotated = "{0}\t{1}\t{2}\n".format(count_episodes, count_states, msg)
file.write(msg_annotated)
def _read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states and self.data
"""
# Open and read the log-file.
with open(self.file_path) as f:
reader = csv.reader(f, delimiter="\t")
self.count_episodes, self.count_states, *data = zip(*reader)
# Convert the remaining log-data to a NumPy float-array.
self.data = np.array(data, dtype='float')
class LogReward(Log):
"""Log the rewards obtained for episodes during training."""
def __init__(self):
# These will be set in read() below.
self.episode = None
self.mean = None
# Super-class init.
Log.__init__(self, file_path=log_reward_path)
def write(self, count_episodes, count_states, episode_epsilon, reward_episode, reward_mean):
"""
Write the episode and mean reward to file.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param reward_episode:
Reward for one episode.
:param reward_mean:
Mean reward for the last e.g. 30 episodes.
"""
msg = "{0:.2f}\t{1:.1f}\t{2:.1f}".format(episode_epsilon, reward_episode, reward_mean)
self._write(count_episodes=count_episodes, count_states=count_states, msg=msg)
def read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states, self.episode and self.mean
"""
# Read the log-file using the super-class.
self._read()
# Get the episode reward.
self.episode = self.data[0]
# Get the mean reward.
self.mean = self.data[1]
class LogQValues(Log):
"""Log the Q-Values during training."""
def __init__(self):
# These will be set in read() below.
self.min = None
self.mean = None
self.max = None
self.std = None
# Super-class init.
Log.__init__(self, file_path=log_q_values_path)
def write(self, count_episodes, count_states, q_values):
"""
Write basic statistics for the Q-values to file.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param q_values:
Numpy array with Q-values from the replay-memory.
"""
msg = "{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format(np.min(q_values),
np.mean(q_values),
np.max(q_values),
np.std(q_values))
self._write(count_episodes=count_episodes,
count_states=count_states,
msg=msg)
def read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states, self.min / mean / max / std.
"""
# Read the log-file using the super-class.
self._read()
# Get the logged statistics for the Q-values.
self.min = self.data[0]
self.mean = self.data[1]
self.max = self.data[2]
self.std = self.data[3]
########################################################################
def print_progress(msg):
"""
Print progress on a single line and overwrite the line.
Used during optimization.
"""
sys.stdout.write("\r" + msg)
sys.stdout.flush()
########################################################################
# A state is basically just a multi-dimensional array that is being
# input to the Neural Network. The state consists of pre-processed images
# from the game-environment. We will just convert the game-images to
# gray-scale and resize them to roughly half their size. This is mainly
# so we can save memory-space in the Replay Memory further below.
# The original DeepMind paper used game-states consisting of 4 frames of
# game-images that were gray-scaled, resized to 110 x 84 pixels, and then
# cropped to 84 x 84 pixels because their implementation only supported this.
# Height of each image-frame in the state.
state_height = 105
# Width of each image-frame in the state.
state_width = 80
# Size of each image in the state.
state_img_size = np.array([state_height, state_width])
# Number of images in the state.
state_channels = 2
# Shape of the state-array.
state_shape = [state_height, state_width, state_channels]
########################################################################
# Functions and classes for processing images from the game-environment
# and converting them into a state.
def _rgb_to_grayscale(image):
"""
Convert an RGB-image into gray-scale using a formula from Wikipedia:
https://en.wikipedia.org/wiki/Grayscale
"""
# Get the separate colour-channels.
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
# Convert to gray-scale using the Wikipedia formula.
img_gray = 0.2990 * r + 0.5870 * g + 0.1140 * b
return img_gray
def _pre_process_image(image):
"""Pre-process a raw image from the game-environment."""
# Convert image to gray-scale.
img = _rgb_to_grayscale(image)
# Resize to the desired size using SciPy for convenience.
img = scipy.misc.imresize(img, size=state_img_size, interp='bicubic')
return img
class MotionTracer:
"""
Used for processing raw image-frames from the game-environment.
The image-frames are converted to gray-scale, resized, and then
the background is removed using filtering of the image-frames
so as to detect motions.
This is needed because a single image-frame of the game environment
is insufficient to determine the direction of moving objects.
The original DeepMind implementation used the last 4 image-frames
of the game-environment to allow the Neural Network to learn how
to detect motion. This implementation could make it a little easier
for the Neural Network to learn how to detect motion, but it has
only been tested on Breakout and Space Invaders, and may not work
for games with more complicated graphics such as Doom. This remains
to be tested.
"""
def __init__(self, image, decay=0.75):
"""
:param image:
First image from the game-environment,
used for resetting the motion detector.
:param decay:
Parameter for how long the tail should be on the motion-trace.
This is a float between 0.0 and 1.0 where higher values means
the trace / tail is longer.
"""
# Pre-process the image and save it for later use.
# The input image may be 8-bit integers but internally
# we need to use floating-point to avoid image-noise
# caused by recurrent rounding-errors.
img = _pre_process_image(image=image)
self.last_input = img.astype(np.float)
# Set the last output to zero.
self.last_output = np.zeros_like(img)
self.decay = decay
def process(self, image):
"""Process a raw image-frame from the game-environment."""
# Pre-process the image so it is gray-scale and resized.
img = _pre_process_image(image=image)
# Subtract the previous input. This only leaves the
# pixels that have changed in the two image-frames.
img_dif = img - self.last_input
# Copy the contents of the input-image to the last input.
self.last_input[:] = img[:]
# If the pixel-difference is greater than a threshold then
# set the output pixel-value to the highest value (white),
# otherwise set the output pixel-value to the lowest value (black).
# So that we merely detect motion, and don't care about details.
img_motion = np.where(np.abs(img_dif) > 20, 255.0, 0.0)
# Add some of the previous output. This recurrent formula
# is what gives the trace / tail.
output = img_motion + self.decay * self.last_output
# Ensure the pixel-values are within the allowed bounds.
output = np.clip(output, 0.0, 255.0)
# Set the last output.
self.last_output = output
return output
def get_state(self):
"""
Get a state that can be used as input to the Neural Network.
It is basically just the last input and the last output of the
motion-tracer. This means it is the last image-frame of the
game-environment, as well as the motion-trace. This shows
the current location of all the objects in the game-environment
as well as trajectories / traces of where they have been.
"""
# Stack the last input and output images.
state = np.dstack([self.last_input, self.last_output])
# Convert to 8-bit integer.
# This is done to save space in the replay-memory.
state = state.astype(np.uint8)
return state
########################################################################
class ReplayMemory:
"""
The replay-memory holds many previous states of the game-environment.
This helps stabilize training of the Neural Network because the data
is more diverse when sampled over thousands of different states.
"""
def __init__(self, size, num_actions, discount_factor=0.97):
"""
:param size:
Capacity of the replay-memory. This is the number of states.
:param num_actions:
Number of possible actions in the game-environment.
:param discount_factor:
Discount-factor used for updating Q-values.
"""
# Array for the previous states of the game-environment.
self.states = np.zeros(shape=[size] + state_shape, dtype=np.uint8)
# Array for the Q-values corresponding to the states.
self.q_values = np.zeros(shape=[size, num_actions], dtype=np.float)
# Array for the Q-values before being updated.
# This is used to compare the Q-values before and after the update.
self.q_values_old = np.zeros(shape=[size, num_actions], dtype=np.float)
# Actions taken for each of the states in the memory.
self.actions = np.zeros(shape=size, dtype=np.int)
# Rewards observed for each of the states in the memory.
self.rewards = np.zeros(shape=size, dtype=np.float)
# Whether the life had ended in each state of the game-environment.
self.end_life = np.zeros(shape=size, dtype=np.bool)
# Whether the episode had ended (aka. game over) in each state.
self.end_episode = np.zeros(shape=size, dtype=np.bool)
# Estimation errors for the Q-values. This is used to balance
# the sampling of batches for training the Neural Network,
# so we get a balanced combination of states with high and low
# estimation errors for their Q-values.
self.estimation_errors = np.zeros(shape=size, dtype=np.float)
# Capacity of the replay-memory as the number of states.
self.size = size
# Discount-factor for calculating Q-values.
self.discount_factor = discount_factor
# Reset the number of used states in the replay-memory.
self.num_used = 0
# Threshold for splitting between low and high estimation errors.
self.error_threshold = 0.1
def is_full(self):
"""Return boolean whether the replay-memory is full."""
return self.num_used == self.size
def used_fraction(self):
"""Return the fraction of the replay-memory that is used."""
return self.num_used / self.size
def reset(self):
"""Reset the replay-memory so it is empty."""
self.num_used = 0
def add(self, state, q_values, action, reward, end_life, end_episode):
"""
Add an observed state from the game-environment, along with the
estimated Q-values, action taken, observed reward, etc.
:param state:
Current state of the game-environment.
This is the output of the MotionTracer-class.
:param q_values:
The estimated Q-values for the state.
:param action:
The action taken by the agent in this state of the game.
:param reward:
The reward that was observed from taking this action
and moving to the next state.
:param end_life:
Boolean whether the agent has lost a life in this state.
:param end_episode:
Boolean whether the agent has lost all lives aka. game over
aka. end of episode.
"""
if not self.is_full():
# Index into the arrays for convenience.
k = self.num_used
# Increase the number of used elements in the replay-memory.
self.num_used += 1
# Store all the values in the replay-memory.
self.states[k] = state
self.q_values[k] = q_values
self.actions[k] = action
self.end_life[k] = end_life
self.end_episode[k] = end_episode
# Note that the reward is limited. This is done to stabilize
# the training of the Neural Network.
self.rewards[k] = np.clip(reward, -1.0, 1.0)
def update_all_q_values(self):
"""
Update all Q-values in the replay-memory.
When states and Q-values are added to the replay-memory, the
Q-values have been estimated by the Neural Network. But we now
have more data available that we can use to improve the estimated
Q-values, because we now know which actions were taken and the
observed rewards. We sweep backwards through the entire replay-memory
to use the observed data to improve the estimated Q-values.
"""
# Copy old Q-values so we can print their statistics later.
# Note that the contents of the arrays are copied.
self.q_values_old[:] = self.q_values[:]
# Process the replay-memory backwards and update the Q-values.
# This loop could be implemented entirely in NumPy for higher speed,
# but it is probably only a small fraction of the overall time usage,
# and it is much easier to understand when implemented like this.
for k in reversed(range(self.num_used-1)):
# Get the data for the k'th state in the replay-memory.
action = self.actions[k]
reward = self.rewards[k]
end_life = self.end_life[k]
end_episode = self.end_episode[k]
# Calculate the Q-value for the action that was taken in this state.
if end_life or end_episode:
# If the agent lost a life or it was game over / end of episode,
# then the value of taking the given action is just the reward
# that was observed in this single step. This is because the
# Q-value is defined as the discounted value of all future game
# steps in a single life of the agent. When the life has ended,
# there will be no future steps.
action_value = reward
else:
# Otherwise the value of taking the action is the reward that
# we have observed plus the discounted value of future rewards
# from continuing the game. We use the estimated Q-values for
# the following state and take the maximum, because we will
# generally take the action that has the highest Q-value.
action_value = reward + self.discount_factor * np.max(self.q_values[k + 1])
# Error of the Q-value that was estimated using the Neural Network.
self.estimation_errors[k] = abs(action_value - self.q_values[k, action])
# Update the Q-value with the better estimate.
self.q_values[k, action] = action_value
self.print_statistics()
def prepare_sampling_prob(self, batch_size=128):
"""
Prepare the probability distribution for random sampling of states
and Q-values for use in training of the Neural Network.
The probability distribution is just a simple binary split of the
replay-memory based on the estimation errors of the Q-values.
The idea is to create a batch of samples that are balanced somewhat
evenly between Q-values that the Neural Network already knows how to
estimate quite well because they have low estimation errors, and
Q-values that are poorly estimated by the Neural Network because
they have high estimation errors.
The reason for this balancing of Q-values with high and low estimation
errors, is that if we train the Neural Network mostly on data with
high estimation errors, then it will tend to forget what it already
knows and hence become over-fit so the training becomes unstable.
"""
# Get the errors between the Q-values that were estimated using
# the Neural Network, and the Q-values that were updated with the
# reward that was actually observed when an action was taken.
err = self.estimation_errors[0:self.num_used]
# Create an index of the estimation errors that are low.
idx = err<self.error_threshold
self.idx_err_lo = np.squeeze(np.where(idx))
# Create an index of the estimation errors that are high.
self.idx_err_hi = np.squeeze(np.where(np.logical_not(idx)))
# Probability of sampling Q-values with high estimation errors.
# This is either set to the fraction of the replay-memory that
# has high estimation errors - or it is set to 0.5. So at least
# half of the batch has high estimation errors.
prob_err_hi = len(self.idx_err_hi) / self.num_used
prob_err_hi = max(prob_err_hi, 0.5)
# Number of samples in a batch that have high estimation errors.
self.num_samples_err_hi = int(prob_err_hi * batch_size)
# Number of samples in a batch that have low estimation errors.
self.num_samples_err_lo = batch_size - self.num_samples_err_hi
def random_batch(self):
"""
Get a random batch of states and Q-values from the replay-memory.
You must call prepare_sampling_prob() before calling this function,
which also sets the batch-size.
The batch has been balanced so it contains states and Q-values
that have both high and low estimation errors for the Q-values.
This is done to both speed up and stabilize training of the
Neural Network.
"""
# Random index of states and Q-values in the replay-memory.
# These have LOW estimation errors for the Q-values.
idx_lo = np.random.choice(self.idx_err_lo,
size=self.num_samples_err_lo,
replace=False)
# Random index of states and Q-values in the replay-memory.
# These have HIGH estimation errors for the Q-values.
idx_hi = np.random.choice(self.idx_err_hi,
size=self.num_samples_err_hi,
replace=False)
# Combine the indices.
idx = np.concatenate((idx_lo, idx_hi))
# Get the batches of states and Q-values.
states_batch = self.states[idx]
q_values_batch = self.q_values[idx]
return states_batch, q_values_batch
def all_batches(self, batch_size=128):
"""
Iterator for all the states and Q-values in the replay-memory.
It returns the indices for the beginning and end, as well as
a progress-counter between 0.0 and 1.0.
This function is not currently being used except by the function
estimate_all_q_values() below. These two functions are merely
included to make it easier for you to experiment with the code
by showing you an easy and efficient way to loop over all the
data in the replay-memory.
"""
# Start index for the current batch.
begin = 0
# Repeat until all batches have been processed.
while begin < self.num_used:
# End index for the current batch.
end = begin + batch_size
# Ensure the batch does not exceed the used replay-memory.
if end > self.num_used:
end = self.num_used
# Progress counter.
progress = end / self.num_used
# Yield the batch indices and completion-counter.
yield begin, end, progress
# Set the start-index for the next batch to the end of this batch.
begin = end
def estimate_all_q_values(self, model):
"""
Estimate all Q-values for the states in the replay-memory
using the model / Neural Network.
Note that this function is not currently being used. It is provided
to make it easier for you to experiment with this code, by showing
you an efficient way to iterate over all the states and Q-values.
:param model:
Instance of the NeuralNetwork-class.
"""
print("Re-calculating all Q-values in replay memory ...")
# Process the entire replay-memory in batches.
for begin, end, progress in self.all_batches():
# Print progress.
msg = "\tProgress: {0:.0%}"
msg = msg.format(progress)
print_progress(msg)
# Get the states for the current batch.
states = self.states[begin:end]
# Calculate the Q-values using the Neural Network
# and update the replay-memory.
self.q_values[begin:end] = model.get_q_values(states=states)
# Newline.
print()
def print_statistics(self):
"""Print statistics for the contents of the replay-memory."""
print("Replay-memory statistics:")
# Print statistics for the Q-values before they were updated
# in update_all_q_values().
msg = "\tQ-values Before, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(self.q_values_old),
np.mean(self.q_values_old),
np.max(self.q_values_old)))
# Print statistics for the Q-values after they were updated
# in update_all_q_values().
msg = "\tQ-values After, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(self.q_values),
np.mean(self.q_values),
np.max(self.q_values)))
# Print statistics for the difference in Q-values before and
# after the update in update_all_q_values().
q_dif = self.q_values - self.q_values_old
msg = "\tQ-values Diff., Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(q_dif),
np.mean(q_dif),
np.max(q_dif)))
# Print statistics for the number of large estimation errors.
# Don't use the estimation error for the last state in the memory,
# because its Q-values have not been updated.
err = self.estimation_errors[:-1]
err_count = np.count_nonzero(err > self.error_threshold)
msg = "\tNumber of large errors > {0}: {1} / {2} ({3:.1%})"
print(msg.format(self.error_threshold, err_count,
self.num_used, err_count / self.num_used))
# How much of the replay-memory is used by states with end_life.
end_life_pct = np.count_nonzero(self.end_life) / self.num_used
# How much of the replay-memory is used by states with end_episode.
end_episode_pct = np.count_nonzero(self.end_episode) / self.num_used
# How much of the replay-memory is used by states with non-zero reward.
reward_nonzero_pct = np.count_nonzero(self.rewards) / self.num_used
# Print those statistics.
msg = "\tend_life: {0:.1%}, end_episode: {1:.1%}, reward non-zero: {2:.1%}"
print(msg.format(end_life_pct, end_episode_pct, reward_nonzero_pct))
########################################################################
class LinearControlSignal:
"""
A control signal that changes linearly over time.
This is used to change e.g. the learning-rate for the optimizer
of the Neural Network, as well as other parameters.
TensorFlow has functionality for doing this, but it uses the
global_step counter inside the TensorFlow graph, while we
want the control signals to use a state-counter for the
game-environment. So it is easier to make this in Python.
"""
def __init__(self, start_value, end_value, num_iterations, repeat=False):
"""
Create a new object.
:param start_value:
Start-value for the control signal.
:param end_value:
End-value for the control signal.
:param num_iterations:
Number of iterations it takes to reach the end_value
from the start_value.
:param repeat:
Boolean whether to reset the control signal back to the start_value
after the end_value has been reached.
"""
# Store arguments in this object.
self.start_value = start_value
self.end_value = end_value
self.num_iterations = num_iterations
self.repeat = repeat
# Calculate the linear coefficient.
self._coefficient = (end_value - start_value) / num_iterations
def get_value(self, iteration):
"""Get the value of the control signal for the given iteration."""
if self.repeat:
iteration %= self.num_iterations
if iteration < self.num_iterations:
value = iteration * self._coefficient + self.start_value
else:
value = self.end_value
return value
########################################################################
class EpsilonGreedy:
"""
The epsilon-greedy policy either takes a random action with
probability epsilon, or it takes the action for the highest
Q-value.
If epsilon is 1.0 then the actions are always random.
If epsilon is 0.0 then the actions are always argmax for the Q-values.
Epsilon is typically decreased linearly from 1.0 to 0.1
and this is also implemented in this class.
During testing, epsilon is usually chosen lower, e.g. 0.05 or 0.01
"""
def __init__(self, num_actions,
epsilon_testing=0.05,
num_iterations=1e6,
start_value=1.0, end_value=0.1,
repeat=False):
"""
:param num_actions:
Number of possible actions in the game-environment.
:param epsilon_testing:
Epsilon-value when testing.
:param num_iterations:
Number of training iterations required to linearly
decrease epsilon from start_value to end_value.
:param start_value:
Starting value for linearly decreasing epsilon.
:param end_value:
Ending value for linearly decreasing epsilon.
:param repeat:
Boolean whether to repeat and restart the linear decrease
when the end_value is reached, or only do it once and then
output the end_value forever after.
"""
# Store parameters.
self.num_actions = num_actions
self.epsilon_testing = epsilon_testing
self.epsilon_AEG = 0.5
# Create a control signal for linearly decreasing epsilon.
self.epsilon_linear = LinearControlSignal(num_iterations=num_iterations,
start_value=start_value,
end_value=end_value,
repeat=repeat)
def set_epsilon(self, epsilon):
self.epsilon_AEG = epsilon
def get_epsilon(self, iteration, training):
"""
Return the epsilon for the given iteration.
If training==True then epsilon is linearly decreased,
otherwise epsilon is a fixed number.
"""
if training:
epsilon = self.epsilon_linear.get_value(iteration=iteration)
else:
epsilon = self.epsilon_testing
return epsilon
def get_action(self, q_values, iteration, training):
"""
Use the epsilon-greedy policy to select an action.
:param q_values:
These are the Q-values that are estimated by the Neural Network
for the current state of the game-environment.
:param iteration:
This is an iteration counter. Here we use the number of states
that has been processed in the game-environment.
:param training:
Boolean whether we are training or testing the
Reinforcement Learning agent.
:return:
action (integer), epsilon (float)
"""
# if iteration > 1e6:
# epsilon = self.epsilon_AEG
# else:
# epsilon = self.get_epsilon(iteration=iteration, training=training)
epsilon = self.get_epsilon(iteration=iteration, training=training)
# With probability epsilon.
if np.random.random() < epsilon:
# Select a random action.
action = np.random.randint(low=0, high=self.num_actions)
else:
# Otherwise select the action that has the highest Q-value.
action = np.argmax(q_values)
return action, epsilon
########################################################################
class NeuralNetwork:
"""
Creates a Neural Network for Reinforcement Learning (Q-Learning).
Functions are provided for estimating Q-values from states of the
game-environment, and for optimizing the Neural Network so it becomes
better at estimating the Q-values.
"""
def __init__(self, num_actions, replay_memory, use_pretty_tensor=True):
"""
:param num_actions:
Number of discrete actions for the game-environment.
:param replay_memory:
Object-instance of the ReplayMemory-class.
:param use_pretty_tensor:
Boolean whether to use PrettyTensor (True) which must then be
installed, or use the tf.layers API (False) which is already
built into TensorFlow.
"""
# Whether to use the PrettyTensor API (True) or tf.layers (False).
self.use_pretty_tensor = use_pretty_tensor
# Replay-memory used for sampling random batches.
self.replay_memory = replay_memory
# Path for saving/restoring checkpoints.
self.checkpoint_path = os.path.join(checkpoint_dir, "checkpoint")
# self.checkpoint_path = os.path.join(os.getcwd(), checkpoint_dir)
# Placeholder variable for inputting states into the Neural Network.
# A state is a multi-dimensional array holding image-frames from
# the game-environment.
self.x = tf.placeholder(dtype=tf.float32, shape=[None] + state_shape, name='x')
# Placeholder variable for inputting the learning-rate to the optimizer.
self.learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
# Placeholder variable for inputting the target Q-values
# that we want the Neural Network to be able to estimate.
self.q_values_new = tf.placeholder(tf.float32,
shape=[None, num_actions],
name='q_values_new')
# This is a hack that allows us to save/load the counter for
# the number of states processed in the game-environment.
# We will keep it as a variable in the TensorFlow-graph
# even though it will not actually be used by TensorFlow.
self.count_states = tf.Variable(initial_value=0,
trainable=False, dtype=tf.int64,
name='count_states')
# Similarly, this is the counter for the number of episodes.
self.count_episodes = tf.Variable(initial_value=0,
trainable=False, dtype=tf.int64,
name='count_episodes')
# TensorFlow operation for increasing count_states.
self.count_states_increase = tf.assign(self.count_states,
self.count_states + 1)
# TensorFlow operation for increasing count_episodes.
self.count_episodes_increase = tf.assign(self.count_episodes,
self.count_episodes + 1)
# The Neural Network will be constructed in the following.
# Note that the architecture of this Neural Network is very
# different from that used in the original DeepMind papers,
# which was something like this:
# Input image: 84 x 84 x 4 (4 gray-scale images of 84 x 84 pixels).
# Conv layer 1: 16 filters 8 x 8, stride 4, relu.
# Conv layer 2: 32 filters 4 x 4, stride 2, relu.
# Fully-conn. 1: 256 units, relu. (Sometimes 512 units).
# Fully-conn. 2: num-action units, linear.
# The DeepMind architecture does a very aggressive downsampling of
# the input images so they are about 10 x 10 pixels after the final
# convolutional layer. I found that this resulted in significantly
# distorted Q-values when using the training method further below.
# The reason DeepMind could get it working was perhaps that they
# used a very large replay memory (5x as big as here), and a single
# optimization iteration was performed after each step of the game,
# and some more tricks.
# Initializer for the layers in the Neural Network.
# If you change the architecture of the network, particularly
# if you add or remove layers, then you may have to change
# the stddev-parameter here. The initial weights must result
# in the Neural Network outputting Q-values that are very close
# to zero - but the network weights must not be too low either
# because it will make it hard to train the network.
# You can experiment with values between 1e-2 and 1e-3.
init = tf.truncated_normal_initializer(mean=0.0, stddev=2e-2)
if self.use_pretty_tensor:
# This builds the Neural Network using the PrettyTensor API,
# which is a very elegant builder API, but some people are
# having problems installing and using it.
import prettytensor as pt
# Wrap the input to the Neural Network in a PrettyTensor object.
x_pretty = pt.wrap(self.x)
# Create the convolutional Neural Network using Pretty Tensor.
with pt.defaults_scope(activation_fn=tf.nn.relu):
self.q_values = x_pretty. \
conv2d(kernel=3, depth=16, stride=2, name='layer_conv1', weights=init). \
conv2d(kernel=3, depth=32, stride=2, name='layer_conv2', weights=init). \
conv2d(kernel=3, depth=64, stride=1, name='layer_conv3', weights=init). \
flatten(). \
fully_connected(size=1024, name='layer_fc1', weights=init). \
fully_connected(size=1024, name='layer_fc2', weights=init). \
fully_connected(size=1024, name='layer_fc3', weights=init). \
fully_connected(size=1024, name='layer_fc4', weights=init). \
fully_connected(size=num_actions, name='layer_fc_out', weights=init,
activation_fn=None)
# Loss-function which must be optimized. This is the mean-squared
# error between the Q-values that are output by the Neural Network
# and the target Q-values.
self.loss = self.q_values.l2_regression(target=self.q_values_new)
else:
# This builds the Neural Network using the tf.layers API,
# which is very verbose and inelegant, but should work for everyone.
# Note that the checkpoints for Tutorial #16 which can be
# downloaded from the internet only support PrettyTensor.
# Although the Neural Networks appear to be identical when
# built using the PrettyTensor and tf.layers APIs,
# they actually create somewhat different TensorFlow graphs
# where the variables have different names, which means the
# checkpoints are incompatible for the two builder APIs.
# Padding used for the convolutional layers.
padding = 'SAME'
# Activation function for all convolutional and fully-connected
# layers, except the last.
activation = tf.nn.relu
# Reference to the lastly added layer of the Neural Network.
# This makes it easy to add or remove layers.
net = self.x
# First convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv1',
filters=16, kernel_size=3, strides=2,
padding=padding,
kernel_initializer=init, activation=activation)
# Second convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv2',
filters=32, kernel_size=3, strides=2,
padding=padding,
kernel_initializer=init, activation=activation)
# Third convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv3',
filters=64, kernel_size=3, strides=1,
padding=padding,
kernel_initializer=init, activation=activation)
# Flatten output of the last convolutional layer so it can
# be input to a fully-connected (aka. dense) layer.
# TODO: For some bizarre reason, this function is not yet in tf.layers
# TODO: net = tf.layers.flatten(net)
net = tf.contrib.layers.flatten(net)
# First fully-connected (aka. dense) layer.
net = tf.layers.dense(inputs=net, name='layer_fc1', units=1024,
kernel_initializer=init, activation=activation)
# Second fully-connected layer.
net = tf.layers.dense(inputs=net, name='layer_fc2', units=1024,
kernel_initializer=init, activation=activation)
# Third fully-connected layer.
net = tf.layers.dense(inputs=net, name='layer_fc3', units=1024,
kernel_initializer=init, activation=activation)
# Fourth fully-connected layer.
net = tf.layers.dense(inputs=net, name='layer_fc4', units=1024,
kernel_initializer=init, activation=activation)
# Final fully-connected layer.
net = tf.layers.dense(inputs=net, name='layer_fc_out', units=num_actions,
kernel_initializer=init, activation=None)
# The output of the Neural Network is the estimated Q-values
# for each possible action in the game-environment.
self.q_values = net
# TensorFlow has a built-in loss-function for doing regression:
# self.loss = tf.nn.l2_loss(self.q_values - self.q_values_new)
# But it uses tf.reduce_sum() rather than tf.reduce_mean()
# which is used by PrettyTensor. This means the scale of the
# gradient is different and hence the hyper-parameters
# would have to be re-tuned. So instead we calculate the
# L2-loss similarly to how it is done in PrettyTensor.
squared_error = tf.square(self.q_values - self.q_values_new)
sum_squared_error = tf.reduce_sum(squared_error, axis=1)
self.loss = tf.reduce_mean(sum_squared_error)
# Optimizer used for minimizing the loss-function.
# Note the learning-rate is a placeholder variable so we can
# lower the learning-rate as optimization progresses.
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
# Used for saving and loading checkpoints.
self.saver = tf.train.Saver()
# Create a new TensorFlow session so we can run the Neural Network.
self.session = tf.Session()
# Load the most recent checkpoint if it exists,
# otherwise initialize all the variables in the TensorFlow graph.
self.load_checkpoint()
def close(self):
"""Close the TensorFlow session."""
self.session.close()
def load_checkpoint(self):
"""
Load all variables of the TensorFlow graph from a checkpoint.
If the checkpoint does not exist, then initialize all variables.
"""
try:
print("Trying to restore last checkpoint ...")
# Use TensorFlow to find the latest checkpoint - if any.
# Nafi Modified this line
# last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=self.checkpoint_path)
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=checkpoint_dir)
# Try and load the data in the checkpoint.
self.saver.restore(self.session, save_path=last_chk_path)
# If we get to this point, the checkpoint was successfully loaded.
print("Restored checkpoint from:", last_chk_path)
except:
# If the above failed for some reason, simply
# initialize all the variables for the TensorFlow graph.
print("Failed to restore checkpoint from:", checkpoint_dir)
print("Initializing variables instead.")
self.session.run(tf.global_variables_initializer())
def save_checkpoint(self, current_iteration):
"""Save all variables of the TensorFlow graph to a checkpoint."""
# Nafi changed the line save_path=self.checkpoint_path
self.saver.save(self.session,
save_path=self.checkpoint_path,
global_step=current_iteration)
print("Saved checkpoint.")
def get_q_values(self, states):
"""
Calculate and return the estimated Q-values for the given states.
A single state contains two images (or channels): The most recent
image-frame from the game-environment, and a motion-tracing image.
See the MotionTracer-class for details.
The input to this function is an array of such states which allows
for batch-processing of the states. So the input is a 4-dim
array with shape: [batch, height, width, state_channels].
The output of this function is an array of Q-value-arrays.
There is a Q-value for each possible action in the game-environment.
So the output is a 2-dim array with shape: [batch, num_actions]
"""
# Create a feed-dict for inputting the states to the Neural Network.
feed_dict = {self.x: states}
# Use TensorFlow to calculate the estimated Q-values for these states.
values = self.session.run(self.q_values, feed_dict=feed_dict)
return values
def optimize(self, min_epochs=1.0, max_epochs=10,
batch_size=128, loss_limit=0.015,
learning_rate=1e-3):
"""
Optimize the Neural Network by sampling states and Q-values
from the replay-memory.
The original DeepMind paper performed one optimization iteration
after processing each new state of the game-environment. This is
an un-natural way of doing optimization of Neural Networks.
So instead we perform a full optimization run every time the
Replay Memory is full (or it is filled to the desired fraction).
This also gives more efficient use of a GPU for the optimization.
The problem is that this may over-fit the Neural Network to whatever
is in the replay-memory. So we use several tricks to try and adapt
the number of optimization iterations.
:param min_epochs:
Minimum number of optimization epochs. One epoch corresponds
to the replay-memory being used once. However, as the batches
are sampled randomly and biased somewhat, we may not use the
whole replay-memory. This number is just a convenient measure.
:param max_epochs:
Maximum number of optimization epochs.
:param batch_size:
Size of each random batch sampled from the replay-memory.
:param loss_limit:
Optimization continues until the average loss-value of the
last 100 batches is below this value (or max_epochs is reached).
:param learning_rate:
Learning-rate to use for the optimizer.
"""
print("Optimizing Neural Network to better estimate Q-values ...")
print("\tLearning-rate: {0:.1e}".format(learning_rate))
print("\tLoss-limit: {0:.3f}".format(loss_limit))
print("\tMax epochs: {0:.1f}".format(max_epochs))
# Prepare the probability distribution for sampling the replay-memory.
self.replay_memory.prepare_sampling_prob(batch_size=batch_size)
# Number of optimization iterations corresponding to one epoch.
iterations_per_epoch = self.replay_memory.num_used / batch_size
# Minimum number of iterations to perform.
min_iterations = int(iterations_per_epoch * min_epochs)
# Maximum number of iterations to perform.
max_iterations = int(iterations_per_epoch * max_epochs)
# Buffer for storing the loss-values of the most recent batches.
loss_history = np.zeros(100, dtype=float)
for i in range(max_iterations):
# Randomly sample a batch of states and target Q-values
# from the replay-memory. These are the Q-values that we
# want the Neural Network to be able to estimate.
state_batch, q_values_batch = self.replay_memory.random_batch()
# Create a feed-dict for inputting the data to the TensorFlow graph.
# Note that the learning-rate is also in this feed-dict.
feed_dict = {self.x: state_batch,
self.q_values_new: q_values_batch,
self.learning_rate: learning_rate}
# Perform one optimization step and get the loss-value.
loss_val, _ = self.session.run([self.loss, self.optimizer],
feed_dict=feed_dict)
# Shift the loss-history and assign the new value.
# This causes the loss-history to only hold the most recent values.
loss_history = np.roll(loss_history, 1)
loss_history[0] = loss_val
# Calculate the average loss for the previous batches.
loss_mean = np.mean(loss_history)
# Print status.
pct_epoch = i / iterations_per_epoch
msg = "\tIteration: {0} ({1:.2f} epoch), Batch loss: {2:.4f}, Mean loss: {3:.4f}"
msg = msg.format(i, pct_epoch, loss_val, loss_mean)
print_progress(msg)
# Stop the optimization if we have performed the required number
# of iterations and the loss-value is sufficiently low.
if i > min_iterations and loss_mean < loss_limit:
break
# Print newline.
print()
# added by Nafi
def get_loss(self):
return self.loss
def get_weights_variable(self, layer_name):
"""
Return the variable inside the TensorFlow graph for the weights
in the layer with the given name.
Note that the actual values of the variables are not returned,
you must use the function get_variable_value() for that.
"""
if self.use_pretty_tensor:
# PrettyTensor uses this name for the weights in a conv-layer.
variable_name = 'weights'
else:
# The tf.layers API uses this name for the weights in a conv-layer.
variable_name = 'kernel'
with tf.variable_scope(layer_name, reuse=True):
variable = tf.get_variable(variable_name)
return variable
def get_variable_value(self, variable):
"""Return the value of a variable inside the TensorFlow graph."""
weights = self.session.run(variable)
return weights
def get_layer_tensor(self, layer_name):
"""
Return the tensor for the output of a layer.
Note that this does not return the actual values,
but instead returns a reference to the tensor
inside the TensorFlow graph. Use get_tensor_value()
to get the actual contents of the tensor.
"""
# The name of the last operation of a layer,
# assuming it uses Relu as the activation-function.
tensor_name = layer_name + "/Relu:0"
# Get the tensor with this name.
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
def get_tensor_value(self, tensor, state):
"""Get the value of a tensor in the Neural Network."""
# Create a feed-dict for inputting the state to the Neural Network.
feed_dict = {self.x: [state]}
# Run the TensorFlow session to calculate the value of the tensor.
output = self.session.run(tensor, feed_dict=feed_dict)
return output
def get_count_states(self):
"""
Get the number of states that has been processed in the game-environment.
This is not used by the TensorFlow graph. It is just a hack to save and
reload the counter along with the checkpoint-file.
"""
return self.session.run(self.count_states)
def get_count_episodes(self):
"""
Get the number of episodes that has been processed in the game-environment.
"""
return self.session.run(self.count_episodes)
def increase_count_states(self):
"""
Increase the number of states that has been processed
in the game-environment.
"""
return self.session.run(self.count_states_increase)
def increase_count_episodes(self):
"""
Increase the number of episodes that has been processed
in the game-environment.
"""
return self.session.run(self.count_episodes_increase)
########################################################################
class Agent:
"""
This implements the function for running the game-environment with
an agent that uses Reinforcement Learning. This class also creates
instances of the Replay Memory and Neural Network.
"""
def __init__(self, env_name, training, render=False, use_logging=True):
"""
Create an object-instance. This also creates a new object for the
Replay Memory and the Neural Network.
Replay Memory will only be allocated if training==True.
:param env_name:
Name of the game-environment in OpenAI Gym.
Examples: 'Breakout-v0' and 'SpaceInvaders-v0'
:param training:
Boolean whether to train the agent and Neural Network (True),
or test the agent by playing a number of episodes of the game (False).
:param render:
Boolean whether to render the game-images to screen during testing.
:param use_logging:
Boolean whether to use logging to text-files during training.
"""
# Create the game-environment using OpenAI Gym.
self.env = gym.make(env_name)
# The number of possible actions that the agent may take in every step.
self.num_actions = self.env.action_space.n
# Whether we are training (True) or testing (False).
self.training = training
# Whether to render each image-frame of the game-environment to screen.
self.render = render
# Whether to use logging during training.
self.use_logging = use_logging
if self.use_logging and self.training:
# Used for logging Q-values and rewards during training.
self.log_q_values = LogQValues()
self.log_reward = LogReward()
else:
self.log_q_values = None
self.log_reward = None
# List of string-names for the actions in the game-environment.
self.action_names = self.env.unwrapped.get_action_meanings()
# Epsilon-greedy policy for selecting an action from the Q-values.
# During training the epsilon is decreased linearly over the given
# number of iterations. During testing the fixed epsilon is used.
self.epsilon_greedy = EpsilonGreedy(start_value=1.0,
end_value=0.1,
num_iterations=1e6,
num_actions=self.num_actions,
epsilon_testing=0.01)
if self.training:
# The following control-signals are only used during training.
# The learning-rate for the optimizer decreases linearly.
self.learning_rate_control = LinearControlSignal(start_value=1e-3,
end_value=1e-5,
num_iterations=5e6)
# The loss-limit is used to abort the optimization whenever the
# mean batch-loss falls below this limit.
self.loss_limit_control = LinearControlSignal(start_value=0.1,
end_value=0.015,
num_iterations=5e6)
# The maximum number of epochs to perform during optimization.
# This is increased from 5 to 10 epochs, because it was found for
# the Breakout-game that too many epochs could be harmful early
# in the training, as it might cause over-fitting.
# Later in the training we would occasionally get rare events
# and would therefore have to optimize for more iterations
# because the learning-rate had been decreased.
self.max_epochs_control = LinearControlSignal(start_value=5.0,
end_value=10.0,
num_iterations=5e6)
# The fraction of the replay-memory to be used.
# Early in the training, we want to optimize more frequently
# so the Neural Network is trained faster and the Q-values
# are learned and updated more often. Later in the training,
# we need more samples in the replay-memory to have sufficient
# diversity, otherwise the Neural Network will over-fit.
self.replay_fraction = LinearControlSignal(start_value=0.1,
end_value=1.0,
num_iterations=5e6)
else:
# We set these objects to None when they will not be used.
self.learning_rate_control = None
self.loss_limit_control = None
self.max_epochs_control = None
self.replay_fraction = None
if self.training:
# We only create the replay-memory when we are training the agent,
# because it requires a lot of RAM. The image-frames from the
# game-environment are resized to 105 x 80 pixels gray-scale,
# and each state has 2 channels (one for the recent image-frame
# of the game-environment, and one for the motion-trace).
# Each pixel is 1 byte, so this replay-memory needs more than
# 3 GB RAM (105 x 80 x 2 x 200000 bytes).
self.replay_memory = ReplayMemory(size=200000,
num_actions=self.num_actions)
else:
self.replay_memory = None
# Create the Neural Network used for estimating Q-values.
self.model = NeuralNetwork(num_actions=self.num_actions,
replay_memory=self.replay_memory)
# Log of the rewards obtained in each episode during calls to run()
self.episode_rewards = []
def reset_episode_rewards(self):
"""Reset the log of episode-rewards."""
self.episode_rewards = []
def get_action_name(self, action):
"""Return the name of an action."""
return self.action_names[action]
def get_lives(self):
"""Get the number of lives the agent has in the game-environment."""
return self.env.unwrapped.ale.lives()
def run(self, num_episodes=None):
"""
Run the game-environment and use the Neural Network to decide
which actions to take in each step through Q-value estimates.
:param num_episodes:
Number of episodes to process in the game-environment.
If None then continue forever. This is useful during training
where you might want to stop the training using Ctrl-C instead.
"""
# This will cause a reset in the first iteration of the following loop.
end_episode = True
# Counter for the number of states we have processed.
# This is stored in the TensorFlow graph so it can be
# saved and reloaded along with the checkpoint.
count_states = self.model.get_count_states()
count_states_for_AEG = count_states
prev_count_states_for_AEG = count_states
# Counter for the number of episodes we have processed.
count_episodes = self.model.get_count_episodes()
if num_episodes is None:
# Loop forever by comparing the episode-counter to infinity.
num_episodes = float('inf')
else:
# The episode-counter may not start at zero if training is
# continued from a checkpoint. Take this into account
# when determining the number of iterations to perform.
num_episodes += count_episodes
#added by Nafi
episode_count_for_AEG = 1
reward_mean_last_100_prev_AEG = 0.1
save_checkpoint_count = 0
while count_episodes <= num_episodes:
if end_episode:
# Reset the game-environment and get the first image-frame.
img = self.env.reset()
# Create a new motion-tracer for processing images from the
# game-environment. Initialize with the first image-frame.
# This resets the motion-tracer so the trace starts again.
# This could also be done if end_life==True.
motion_tracer = MotionTracer(img)
# Reset the reward for the entire episode to zero.
# This is only used for printing statistics.
reward_episode = 0.0
# Increase the counter for the number of episodes.
# This counter is stored inside the TensorFlow graph
# so it can be saved and restored with the checkpoint.
count_episodes = self.model.increase_count_episodes()
# Get the number of lives that the agent has left in this episode.
num_lives = self.get_lives()
# Get the state of the game-environment from the motion-tracer.
# The state has two images: (1) The last image-frame from the game
# and (2) a motion-trace that shows movement trajectories.
state = motion_tracer.get_state()
# Use the Neural Network to estimate the Q-values for the state.
# Note that the function assumes an array of states and returns
# a 2-dim array of Q-values, but we just have a single state here.
q_values = self.model.get_q_values(states=[state])[0]
# Determine the action that the agent must take in the game-environment.
# The epsilon is just used for printing further below.
# changed by Nafi
action, epsilon = self.epsilon_greedy.get_action(q_values=q_values,
iteration=count_states_for_AEG,
training=self.training)
# Take a step in the game-environment using the given action.
# Note that in OpenAI Gym, the step-function actually repeats the
# action between 2 and 4 time-steps for Atari games, with the number
# chosen at random.
img, reward, end_episode, info = self.env.step(action=action)
# Process the image from the game-environment in the motion-tracer.
# This will first be used in the next iteration of the loop.
motion_tracer.process(image=img)
# Add the reward for the step to the reward for the entire episode.
reward_episode += reward
# Determine if a life was lost in this step.
num_lives_new = self.get_lives()
end_life = (num_lives_new < num_lives)
num_lives = num_lives_new
# Increase the counter for the number of states that have been processed.
# changed by Nafi
temp_state_count = count_states
count_states = self.model.increase_count_states()
count_states_for_AEG = count_states_for_AEG + (count_states - temp_state_count)
if not self.training and self.render:
# Render the game-environment to screen.
self.env.render()
# Insert a small pause to slow down the game,
# making it easier to follow for human eyes.
time.sleep(0.01)
# If we want to train the Neural Network to better estimate Q-values.
if self.training:
# Add the state of the game-environment to the replay-memory.
self.replay_memory.add(state=state,
q_values=q_values,
action=action,
reward=reward,
end_life=end_life,
end_episode=end_episode)
# How much of the replay-memory should be used.
use_fraction = self.replay_fraction.get_value(iteration=count_states)
# When the replay-memory is sufficiently full.
if self.replay_memory.is_full() \
or self.replay_memory.used_fraction() > use_fraction:
# Update all Q-values in the replay-memory through a backwards-sweep.
self.replay_memory.update_all_q_values()
# Log statistics for the Q-values to file.
if self.use_logging:
self.log_q_values.write(count_episodes=count_episodes,
count_states=count_states,
q_values=self.replay_memory.q_values)
# Get the control parameters for optimization of the Neural Network.
# These are changed linearly depending on the state-counter.
learning_rate = self.learning_rate_control.get_value(iteration=count_states)
loss_limit = self.loss_limit_control.get_value(iteration=count_states)
max_epochs = self.max_epochs_control.get_value(iteration=count_states)
# Perform an optimization run on the Neural Network so as to
# improve the estimates for the Q-values.
# This will sample random batches from the replay-memory.
self.model.optimize(learning_rate=learning_rate,
loss_limit=loss_limit,
max_epochs=max_epochs)
# Save a checkpoint of the Neural Network so we can reload it.
self.model.save_checkpoint(count_states)
# Reset the replay-memory. This throws away all the data we have
# just gathered, so we will have to fill the replay-memory again.
self.replay_memory.reset()
if end_episode:
# Add the episode's reward to a list for calculating statistics.
self.episode_rewards.append(reward_episode)
# Nafi
# print("Q-values:", self.replay_memory.q_values)
# print("Loss-values:", self.model.get_loss())
# Mean reward of the last 100 episodes.
if len(self.episode_rewards) == 0:
# The list of rewards is empty.
reward_mean = 0.0
else:
reward_mean = np.mean(self.episode_rewards[-100:])
if self.training and end_episode:
# Log reward to file.
if self.use_logging:
self.log_reward.write(count_episodes=count_episodes,
count_states=count_states,
episode_epsilon = epsilon,
reward_episode=reward_episode,
reward_mean=reward_mean)
# Print reward to screen.
msg = "{0:4}:{1}\t Epsilon: {2:4.2f}\t Reward: {3:.1f}\t Episode Mean: {4:.1f}"
print(msg.format(count_episodes, count_states, epsilon,
reward_episode, reward_mean))
#added by Nafi for AEG
if episode_count_for_AEG == 100:
if len(self.episode_rewards) == 0:
# The list of rewards is empty.
reward_mean_last_100_current = 0.0
else:
reward_mean_last_100_current = np.mean(self.episode_rewards[-100:])
print("Prev Reward mean: ", reward_mean_last_100_prev_AEG, " Current Reward mean: ", reward_mean_last_100_current)
episode_count_for_AEG = 0
reward_reduction = (reward_mean_last_100_prev_AEG - reward_mean_last_100_current) / reward_mean_last_100_prev_AEG
if (reward_reduction > 0.1):
# change epsilon greedy adaptively
print("Inside AEG")
print("State Count : ", count_states_for_AEG, "Changed State Count : ", prev_count_states_for_AEG)
print("")
state_count_diff = count_states_for_AEG - prev_count_states_for_AEG
if count_states_for_AEG > 1e6:
epsilon_new = epsilon + ((0.9/1e6) * state_count_diff)
count_states_for_AEG = ((1 - epsilon_new) / 0.9) * 1e6
else:
count_states_for_AEG = prev_count_states_for_AEG
reward_mean_last_100_prev_AEG = reward_mean_last_100_current
prev_count_states_for_AEG = count_states_for_AEG
episode_count_for_AEG += 1
elif not self.training and (reward != 0.0 or end_life or end_episode):
# Print Q-values and reward to screen.
msg = "{0:4}:{1}\tQ-min: {2:5.3f}\tQ-max: {3:5.3f}\tLives: {4}\tReward: {5:.1f}\tEpisode Mean: {6:.1f}"
print(msg.format(count_episodes, count_states, np.min(q_values),
np.max(q_values), num_lives, reward_episode, reward_mean))
########################################################################
if __name__ == '__main__':
# Description of this program.
desc = "Reinformenct Learning (Q-learning) for Atari Games using TensorFlow."
# Create the argument parser.
parser = argparse.ArgumentParser(description=desc)
# Add arguments to the parser.
parser.add_argument("--env", required=False, default='Breakout-v0',
help="name of the game-environment in OpenAI Gym")
parser.add_argument("--training", required=False,
dest='training', action='store_true',
help="train the agent (otherwise test the agent)")
parser.add_argument("--render", required=False,
dest='render', action='store_true',
help="render game-output to screen")
parser.add_argument("--episodes", required=False, type=int, default=None,
help="number of episodes to run")
parser.add_argument("--dir", required=False, default=checkpoint_base_dir,
help="directory for the checkpoint and log-files")
# Parse the command-line arguments.
args = parser.parse_args()
# Get the arguments.
env_name = args.env
training = args.training
render = args.render
num_episodes = args.episodes
checkpoint_base_dir = args.dir
# Update all the file-paths after the base-dir has been set.
update_paths(env_name=env_name)
# Create an agent for either training or testing on the game-environment.
agent = Agent(env_name=env_name,
training=training,
render=render)
# Run the agent
agent.run(num_episodes=num_episodes)
# Print statistics.
rewards = agent.episode_rewards
print() # Newline.
print("Rewards for {0} episodes:".format(len(rewards)))
print("- Min: ", np.min(rewards))
print("- Mean: ", np.mean(rewards))
print("- Max: ", np.max(rewards))
print("- Stdev: ", np.std(rewards))
########################################################################
| 41.488197 | 134 | 0.620486 |
e713f13d7973964f177328611d71096d5a70923f | 1,123 | py | Python | day_33/challenge_01/main.py | steveanlorn/python_bootcamp_100_days | e542df58406ecae54c76fc8d5cbe08b81243b92f | [
"MIT"
] | null | null | null | day_33/challenge_01/main.py | steveanlorn/python_bootcamp_100_days | e542df58406ecae54c76fc8d5cbe08b81243b92f | [
"MIT"
] | null | null | null | day_33/challenge_01/main.py | steveanlorn/python_bootcamp_100_days | e542df58406ecae54c76fc8d5cbe08b81243b92f | [
"MIT"
] | 1 | 2021-06-04T03:37:22.000Z | 2021-06-04T03:37:22.000Z | from tkinter import *
import requests
import requests.exceptions
def get_quote():
try:
response = requests.get("https://api.kanye.rest")
response.raise_for_status()
except Exception as error:
canvas.itemconfig(quote_text, text="")
canvas.itemconfig(error_text, text=error)
else:
response_data = response.json()
canvas.itemconfig(error_text, text="")
canvas.itemconfig(quote_text, text=response_data["quote"])
window = Tk()
window.title("Kanye Says...")
window.config(padx=50, pady=50)
canvas = Canvas(width=300, height=414)
background_img = PhotoImage(file="background.png")
canvas.create_image(150, 207, image=background_img)
quote_text = canvas.create_text(150, 207, text="Kanye Quote Goes HERE", width=250, font=("Arial", 30, "bold"), fill="white")
error_text = canvas.create_text(150, 207, width=250, font=("Arial", 16, "bold"), fill="red")
canvas.grid(row=0, column=0)
kanye_img = PhotoImage(file="kanye.png")
kanye_button = Button(image=kanye_img, highlightthickness=0, command=get_quote)
kanye_button.grid(row=1, column=0)
window.mainloop()
| 32.085714 | 124 | 0.711487 |
77e5c1a6f75dbdf10506db981ba1cf49554d3b2c | 4,402 | py | Python | fastai/core.py | jaidmin/fastai-caffe2 | 416b55c4c1ec6f672c7512a12234e33716712a60 | [
"Apache-2.0"
] | 17 | 2018-07-07T01:39:57.000Z | 2021-01-20T19:24:03.000Z | fastai/core.py | jaidmin/fastai-caffe2 | 416b55c4c1ec6f672c7512a12234e33716712a60 | [
"Apache-2.0"
] | 1 | 2018-04-10T21:26:14.000Z | 2018-04-10T21:26:14.000Z | fastai/core.py | PiotrCzapla/fastai | 848405510b9f3c36eff9af1666514babd8cf165e | [
"Apache-2.0"
] | 4 | 2018-04-18T05:14:38.000Z | 2021-06-14T17:19:43.000Z | from .imports import *
from .torch_imports import *
def sum_geom(a,r,n): return a*n if r==1 else math.ceil(a*(1-r**n)/(1-r))
def is_listy(x): return isinstance(x, (list,tuple))
def is_iter(x): return isinstance(x, collections.Iterable)
def map_over(x, f): return [f(o) for o in x] if is_listy(x) else f(x)
def map_none(x, f): return None if x is None else f(x)
conv_dict = {np.dtype('int8'): torch.LongTensor, np.dtype('int16'): torch.LongTensor,
np.dtype('int32'): torch.LongTensor, np.dtype('int64'): torch.LongTensor,
np.dtype('float32'): torch.FloatTensor, np.dtype('float64'): torch.FloatTensor}
def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
def T(a, half=False, cuda=True):
if not torch.is_tensor(a):
a = np.array(np.ascontiguousarray(a))
if a.dtype in (np.int8, np.int16, np.int32, np.int64):
a = torch.LongTensor(a.astype(np.int64))
elif a.dtype in (np.float32, np.float64):
a = torch.cuda.HalfTensor(a) if half else torch.FloatTensor(a)
else: raise NotImplementedError(a.dtype)
if cuda: a = to_gpu(a, async=True)
return a
def create_variable(x, volatile, requires_grad=False):
if type (x) != Variable:
if IS_TORCH_04: x = Variable(T(x), requires_grad=requires_grad)
else: x = Variable(T(x), requires_grad=requires_grad, volatile=volatile)
return x
def V_(x, requires_grad=False, volatile=False): return create_variable(x, volatile, requires_grad)
def V (x, requires_grad=False, volatile=False): return map_over(x, lambda o: V_(o, requires_grad, volatile))
def VV_(x): return create_variable(x, True)
def VV (x): return map_over(x, VV_)
def to_np(v):
if isinstance(v, (np.ndarray, np.generic)): return v
if isinstance(v, (list,tuple)): return [to_np(o) for o in v]
if isinstance(v, Variable): v=v.data
if isinstance(v, torch.cuda.HalfTensor): v=v.float()
return v.cpu().numpy()
IS_TORCH_04 = LooseVersion(torch.__version__) >= LooseVersion('0.4')
USE_GPU = torch.cuda.is_available()
def to_gpu(x, *args, **kwargs):
return x.cuda(*args, **kwargs) if USE_GPU else x
def noop(*args, **kwargs): return
def split_by_idxs(seq, idxs):
last = 0
for idx in idxs:
yield seq[last:idx]
last = idx
yield seq[last:]
def trainable_params_(m):
return [p for p in m.parameters() if p.requires_grad]
def chain_params(p):
if is_listy(p):
return list(chain(*[trainable_params_(o) for o in p]))
return trainable_params_(p)
def set_trainable_attr(m,b):
m.trainable=b
for p in m.parameters(): p.requires_grad=b
def apply_leaf(m, f):
c = children(m)
if isinstance(m, nn.Module): f(m)
if len(c)>0:
for l in c: apply_leaf(l,f)
def set_trainable(l, b):
apply_leaf(l, lambda m: set_trainable_attr(m,b))
def SGD_Momentum(momentum):
return lambda *args, **kwargs: optim.SGD(*args, momentum=momentum, **kwargs)
def one_hot(a,c): return np.eye(c)[a]
def partition(a, sz): return [a[i:i+sz] for i in range(0, len(a), sz)]
def partition_by_cores(a):
return partition(a, len(a)//num_cpus() + 1)
def num_cpus():
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
class BasicModel():
def __init__(self,model,name='unnamed'): self.model,self.name = model,name
def get_layer_groups(self, do_fc=False): return children(self.model)
class SingleModel(BasicModel):
def get_layer_groups(self): return [self.model]
class SimpleNet(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList([
nn.Linear(layers[i], layers[i + 1]) for i in range(len(layers) - 1)])
def forward(self, x):
x = x.view(x.size(0), -1)
for l in self.layers:
l_x = l(x)
x = F.relu(l_x)
return F.log_softmax(l_x, dim=-1)
def save(fn, a): pickle.dump(a, open(fn,'wb'))
def load(fn): return pickle.load(open(fn,'rb'))
def load2(fn): return pickle.load(open(fn,'rb'), encoding='iso-8859-1')
def load_array(fname): return bcolz.open(fname)[:]
def chunk_iter(iterable, chunk_size):
while True:
chunk = []
try:
for _ in range(chunk_size): chunk.append(next(iterable))
yield chunk
except StopIteration:
if chunk: yield chunk
break
| 32.367647 | 108 | 0.650841 |
b5536c0c2b5ca753532bdbff6b1f4e59224e2fbb | 14,306 | py | Python | jyotisha/panchaanga/temporal/zodiac/__init__.py | Prabhakaran-cbe/jyotisha | 689327c5944c6cc84b7e58af4deae2a4ebe94d7b | [
"MIT"
] | 40 | 2017-10-01T04:22:35.000Z | 2020-11-30T03:47:57.000Z | jyotisha/panchaanga/temporal/zodiac/__init__.py | Prabhakaran-cbe/jyotisha | 689327c5944c6cc84b7e58af4deae2a4ebe94d7b | [
"MIT"
] | 71 | 2017-08-27T13:54:06.000Z | 2020-12-11T01:16:47.000Z | jyotisha/panchaanga/temporal/zodiac/__init__.py | Prabhakaran-cbe/jyotisha | 689327c5944c6cc84b7e58af4deae2a4ebe94d7b | [
"MIT"
] | 23 | 2017-08-27T11:54:41.000Z | 2020-11-14T19:41:58.000Z | import logging
import sys
from math import floor
from numbers import Number
from typing import Optional
import methodtools
import numpy
import swisseph as swe
from jyotisha.panchaanga.temporal.body import Graha
from jyotisha.panchaanga.temporal.interval import Interval, AngaSpan
from jyotisha.panchaanga.temporal.zodiac.angas import AngaType, Anga
from jyotisha.util import default_if_none
from sanskrit_data.schema import common
from sanskrit_data.schema.common import JsonObject
from scipy.optimize import brentq
from timebudget import timebudget
# noinspection SpellCheckingInspection
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
class Ayanamsha(common.JsonObject):
"""
rAShTriya panchAnga nakshatra ayanAmsha vs chitra at 180 :
- Shaves off 3 seconds from typical panchaanga computation compared to precise chitrA tracking.
- rAShTriya panchAnga nakshatra ayanAmsha tracks chitra fairly well. Still, it results in ~5 minutes differences in nakshatra spans.
- chitrA does not move a lot in typical year, and it is mostly wasteful to compute its position fresh for every instant.
"""
VERNAL_EQUINOX_AT_0 = "VERNAL_EQUINOX_AT_0"
CHITRA_AT_180 = "CHITRA_AT_180"
ASHVINI_STARTING_0 = "ASHVINI_STARTING_0"
RASHTRIYA_PANCHANGA_NAKSHATRA_TRACKING = "RASHTRIYA_PANCHANGA_NAKSHATRA_TRACKING"
@methodtools.lru_cache(maxsize=None)
@classmethod
def singleton(cls, ayanaamsha_id):
return cls(ayanaamsha_id=ayanaamsha_id)
def __init__(self, ayanaamsha_id):
super().__init__()
self.ayanaamsha_id = ayanaamsha_id
def get_offset(self, jd):
if self.ayanaamsha_id == Ayanamsha.VERNAL_EQUINOX_AT_0:
return 0
elif self.ayanaamsha_id == Ayanamsha.CHITRA_AT_180:
# TODO: The below fails due to https://github.com/astrorigin/pyswisseph/issues/35
from jyotisha.panchaanga.temporal import body
return body.get_star_longitude(star="Spica", jd=jd) - 180
elif self.ayanaamsha_id == Ayanamsha.ASHVINI_STARTING_0:
return 0
elif self.ayanaamsha_id == Ayanamsha.RASHTRIYA_PANCHANGA_NAKSHATRA_TRACKING:
swe.set_sid_mode(swe.SIDM_LAHIRI)
return swe.get_ayanamsa_ut(jd)
raise Exception("Bad ayanamsha_id")
class NakshatraDivision(common.JsonObject):
"""Nakshatra division at a certain time, according to a certain ayanaamsha."""
def __init__(self, jd, ayanaamsha_id):
super().__init__()
self.ayanaamsha_id = ayanaamsha_id
self.jd = jd
def get_fractional_division_for_body(self, body: Graha, anga_type: AngaType) -> float:
"""
:param body: graha ID.
:return: 0.x for AshvinI and so on.
"""
longitude = body.get_longitude(self.jd, ayanaamsha_id=self.ayanaamsha_id)
return self.longitude_to_fractional_division(longitude=longitude, anga_type=anga_type)
def get_equatorial_boundary_coordinates(self):
"""Get equatorial coordinates for the points where the ecliptic nakShatra boundary longitude intersects the ecliptic."""
nakShatra_ends = ((numpy.arange(27) + 1) * (360.0 / 27.0) + Ayanamsha.singleton(
self.ayanaamsha_id).get_offset(
self.jd)) % 360
equatorial_boundary_coordinates = [ecliptic_to_equatorial(longitude=longitude, latitude=0) for longitude in nakShatra_ends]
return equatorial_boundary_coordinates
def get_stellarium_nakshatra_boundaries(self):
equatorial_boundary_coordinates_with_ra = self.get_equatorial_boundary_coordinates()
ecliptic_north_pole_with_ra = ecliptic_to_equatorial(longitude=20, latitude=90)
# logging.debug(ecliptic_north_pole_with_ra)
ecliptic_south_pole_with_ra = ecliptic_to_equatorial(longitude=20, latitude=-90)
# logging.debug(ecliptic_south_pole_with_ra)
for index, (boundary_ra, boundary_declination) in enumerate(equatorial_boundary_coordinates_with_ra):
print(
'3 %(north_pole_ra)f %(north_pole_dec)f %(boundary_ra)f %(boundary_declination)f %(south_pole_ra)f %(south_pole_dec)f 2 N%(sector_id_1)02d N%(sector_id_2)02d' % dict(
north_pole_ra=ecliptic_north_pole_with_ra[0],
north_pole_dec=ecliptic_north_pole_with_ra[1],
boundary_ra=boundary_ra,
boundary_declination=boundary_declination,
south_pole_ra=ecliptic_south_pole_with_ra[0],
south_pole_dec=ecliptic_south_pole_with_ra[1],
sector_id_1=(index % 27 + 1),
sector_id_2=((index + 1) % 27 + 1)
))
def longitude_to_fractional_division(self, longitude, anga_type):
return (longitude % 360) / anga_type.arc_length
def get_anga_float(self, anga_type):
"""Returns the anga/ temporal property. Computed based on lunar and solar longitudes, division of a circle into a certain number of degrees (arc_len).
Args:
:param anga_type: One of the pre-defined tuple-valued constants in the panchaanga
class, such as TITHI, nakshatra, YOGA, KARANA or SIDEREAL_MONTH
Returns:
float anga
"""
if anga_type == AngaType.TITHI:
# For efficiency - avoid lookups.
ayanaamsha_id = Ayanamsha.VERNAL_EQUINOX_AT_0
else:
ayanaamsha_id = self.ayanaamsha_id
w_moon = anga_type.weight_moon
w_sun = anga_type.weight_sun
lcalc = 0 # computing offset longitudes
# Get the lunar longitude, starting at the ayanaamsha point in the ecliptic.
if w_moon != 0:
lmoon = Graha.singleton(Graha.MOON).get_longitude(self.jd, ayanaamsha_id=ayanaamsha_id)
lcalc += w_moon * lmoon
# Get the solar longitude, starting at the ayanaamsha point in the ecliptic.
if w_sun != 0:
lsun = Graha.singleton(Graha.SUN).get_longitude(self.jd, ayanaamsha_id=ayanaamsha_id)
lcalc += w_sun * lsun
return self.longitude_to_fractional_division(longitude=lcalc, anga_type=anga_type)
def get_anga(self, anga_type):
"""Returns the anga prevailing at a particular time. Computed based on lunar and solar longitudes, division of a circle into a certain number of degrees (arc_len).
Args:
float arc_len: The arc_len for the corresponding anga
Returns:
int anga
"""
return Anga.get_cached(index=int(1 + floor(self.get_anga_float(anga_type))), anga_type_id=anga_type.name)
def get_all_angas(self):
"""Compute various properties of the time based on lunar and solar longitudes, division of a circle into a certain number of degrees (arc_len).
"""
anga_objects = [AngaType.TITHI, AngaType.TITHI_PADA, AngaType.NAKSHATRA, AngaType.NAKSHATRA_PADA, AngaType.RASHI,
AngaType.SIDEREAL_MONTH, AngaType.SOLAR_NAKSH, AngaType.YOGA, AngaType.KARANA]
angas = list(map(lambda anga_object: self.get_anga(anga_type=anga_object), anga_objects))
anga_ids = list(map(lambda anga_obj: anga_obj.index, anga_objects))
return dict(list(zip(anga_ids, angas)))
def get_nakshatra(self):
"""Returns the nakshatra prevailing at a given moment
Nakshatra is computed based on the longitude of the Moon; in
addition, to obtain the absolute value of the longitude, the
ayanamsa is required to be subtracted.
Returns:
int nakShatram, where 1 stands for Ashwini, ..., 14 stands
for Chitra, ..., 27 stands for Revati
"""
return self.get_anga(AngaType.NAKSHATRA)
def get_yoga(self):
"""Returns the yoha prevailing at a given moment
Yoga is computed based on the longitude of the Moon and longitude of
the Sun; in addition, to obtain the absolute value of the longitude, the
ayanamsa is required to be subtracted (for each).
Returns:
int yoga, where 1 stands for Vishkambha and 27 stands for Vaidhrti
"""
return self.get_anga(AngaType.YOGA)
def get_solar_raashi(self):
"""Returns the solar rashi prevailing at a given moment
Solar month is computed based on the longitude of the sun; in
addition, to obtain the absolute value of the longitude, the
ayanamsa is required to be subtracted.
Returns:
int rashi, where 1 stands for mESa, ..., 12 stands for mIna
"""
return self.get_anga(AngaType.SIDEREAL_MONTH)
def longitude_to_right_ascension(longitude):
return (360 - longitude) / 360 * 24
def ecliptic_to_equatorial(longitude, latitude):
coordinates = swe.cotrans(lon=longitude, lat=latitude, dist=9999999, obliquity=23.437404)
# swe.cotrans returns the right ascension longitude in degrees, rather than hours.
return (
longitude_to_right_ascension(coordinates[0]), coordinates[1])
class AngaSpanFinder(JsonObject):
def __init__(self, ayanaamsha_id, anga_type):
super(AngaSpanFinder, self).__init__()
self.ayanaamsha_id = ayanaamsha_id
self.anga_type = anga_type
@methodtools.lru_cache(maxsize=None)
@classmethod
def get_cached(cls, ayanaamsha_id, anga_type):
return AngaSpanFinder(ayanaamsha_id=ayanaamsha_id, anga_type=anga_type)
def _get_anga(self, jd):
return NakshatraDivision(jd, ayanaamsha_id=self.ayanaamsha_id).get_anga( anga_type=self.anga_type)
def _get_anga_float_offset(self, jd, target_anga):
anga_float = NakshatraDivision(jd, ayanaamsha_id=self.ayanaamsha_id).get_anga_float(anga_type=self.anga_type)
num_angas = self.anga_type.num_angas
if anga_float > target_anga.index:
return anga_float - num_angas # A negative number
else:
return anga_float - (target_anga.index - 1)
def _interpolate_for_start(self, jd1, jd2, target_anga):
try:
# noinspection PyTypeChecker
return brentq(lambda x: self._get_anga_float_offset(jd=x, target_anga=target_anga), jd1, jd2)
except ValueError:
return None
def find_anga_start_between(self, jd1, jd2, target_anga):
jd_start = None
num_angas = self.anga_type.num_angas
min_step = 0.5 * self.anga_type.mean_period_days/num_angas # Min Step for moving - half an anga span.
jd_bracket_L = jd1
jd_now = jd1
while jd_now <= jd2 and jd_start is None:
anga_now = self._get_anga(jd=jd_now)
if anga_now < target_anga:
# So, jd_now will be lower than jd_start
jd_bracket_L = jd_now
if anga_now == target_anga:
# In this branch, anga_now will have overshot the jd_start of the required interval.
jd_start = self._interpolate_for_start(jd1=jd_bracket_L, jd2=jd_now, target_anga=target_anga)
if jd_now == jd2:
# Prevent infinite loop
break
jd_now = min(jd_now + min_step, jd2)
return jd_start
@timebudget
def find(self, jd1: float, jd2: float, target_anga_id: int) -> Optional[AngaSpan]:
"""Computes anga spans for sunrise_day_angas such as tithi, nakshatra, yoga
and karana.
Args:
:param jd1: return the first span that starts after this date
:param jd2: return the first span that ends before this date
Returns:
None if target_anga_id was not found
Interval, with boundary jds None if they don't occur within [jd1, jd2]
"""
if isinstance(target_anga_id, Number):
# TODO: Remove this backward compatibility fix
target_anga = Anga.get_cached(index=target_anga_id, anga_type_id=self.anga_type.name)
else:
target_anga = target_anga_id
anga_interval = AngaSpan(jd_start=None, jd_end=None, anga=target_anga)
anga_interval.jd_start = self.find_anga_start_between(jd1=jd1, jd2=jd2, target_anga=target_anga)
next_anga = target_anga + 1
anga_interval.jd_end = self.find_anga_start_between(jd1=default_if_none(anga_interval.jd_start, jd1), jd2=jd2, target_anga=next_anga)
if anga_interval.jd_start is None and anga_interval.jd_end is None:
if self._get_anga(jd=jd1) != target_anga:
return None
return anga_interval
@timebudget
def get_spans_in_period(self, jd_start, jd_end, target_anga_id):
if jd_start > jd_end:
raise ValueError((jd_start, jd_end))
jd_bracket_L = jd_start
spans = []
while jd_bracket_L <= jd_end:
# A whole period plus 4 angas beyond jd_bracket_L, which might be 2 angas behind the target anga.
jd_bracket_R = min(jd_bracket_L + (1 + 4.0/self.anga_type.num_angas) * self.anga_type.mean_period_days, jd_end)
span = self.find(
jd1=jd_bracket_L, jd2=jd_bracket_R,
target_anga_id=target_anga_id)
if span is None:
break
else:
spans.append(span)
# A whole period minus 2 angas as the next seek boundary
jd_bracket_L = default_if_none(span.jd_start, jd_bracket_L) + self.anga_type.mean_period_days * (1 - 2.0 / self.anga_type.num_angas)
return spans
@timebudget
def get_all_angas_in_period(self, jd1, jd2):
spans = []
jd_start = None
anga_now = self._get_anga(jd=jd1)
while default_if_none(jd_start, jd1) <= jd2:
next_anga = anga_now + 1
jd_end = self.find_anga_start_between(target_anga=next_anga, jd1=default_if_none(jd_start, jd1), jd2=jd2)
spans.append(AngaSpan(jd_start=jd_start, jd_end=jd_end, anga=anga_now))
if jd_end is None:
break
else:
anga_now = next_anga
jd_start = jd_end
return spans
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
def get_tropical_month(jd):
nd = NakshatraDivision(jd=jd, ayanaamsha_id=Ayanamsha.ASHVINI_STARTING_0)
return nd.get_anga(anga_type=AngaType.TROPICAL_MONTH)
def get_previous_solstice_month_span(jd):
"""Get the previous solstice (especially the tropical month id and the jd.)
Returns an AngaSpan object.
"""
tropical_month = get_tropical_month(jd=jd)
if tropical_month.index >= 4 and tropical_month.index < 10:
target_month_id = 4
else:
target_month_id = 10
months_past_solstice = (tropical_month - target_month_id) % 12
jd1 = jd - (months_past_solstice * 30 + months_past_solstice + 35)
jd2 = jd - (months_past_solstice * 30 + months_past_solstice) + 35
anga_span_finder = AngaSpanFinder.get_cached(ayanaamsha_id=Ayanamsha.ASHVINI_STARTING_0, anga_type=AngaType.TROPICAL_MONTH)
anga_span = anga_span_finder.find(jd1=jd1, jd2=jd2, target_anga_id=target_month_id)
return anga_span
if __name__ == '__main__':
# lahiri_nakshatra_division = NakshatraDivision(jd=temporal.utc_to_jd(year=2017, month=8, day=19, hour=11, minutes=10, seconds=0, flag=1)[0])
pass
| 39.194521 | 174 | 0.733678 |
733eb8ffdbffb48422f409f7417df400aad983d0 | 24,435 | py | Python | X3_Customizer/Make_Documentation.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 4 | 2018-01-17T16:49:05.000Z | 2018-05-16T20:41:02.000Z | X3_Customizer/Make_Documentation.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 1 | 2021-07-23T00:50:08.000Z | 2021-07-23T00:50:08.000Z | X3_Customizer/Make_Documentation.py | bvbohnen/X3_Customizer | 7e8808ee33c463348a3d12959b9311fff17e224d | [
"MIT"
] | 1 | 2018-10-13T18:19:02.000Z | 2018-10-13T18:19:02.000Z | '''
Generate documentation for the customizer.
This will parse the docstrings at the top of X3_Customizer and
for each transform, and write them to a plain text file.
This can generally be called directly as an entry function.
Quick markdown notes, for the readme which will be dispayed on
github, and the main documentation just because:
-Newlines don't particularly matter. Can break up lines for a text
file, and they will get joined back together.
-This is bad for a list of transforms, since they get lumped together,
so aim to put markdown characters on them to listify them.
-Adding asterisks at the start of lines will turn them into a list,
as long as there is a space between the asterisk and the text.
-Indentation by 4 spaces or 1 tab creates code blocks; try to avoid
this level of indent unless code blocking is intentional.
-Indentation is built into some docstrings, though the only one that
should matter for the markdown version is x3_customizer. That one needs
to be carefully adjusted to avoid 4-space chains, including across
newlines.
-Triple -,*,_ will place a horizontal thick line. Can be used between
sections. Avoid dashes, though, since they make whatever is above them
into a header, unless that is desired.
'''
import os
import sys
from pathlib import Path
from collections import OrderedDict, defaultdict
# To support packages cross-referencing each other, set up this
# top level as a package, findable on the sys path.
parent_dir = Path(__file__).resolve().parent.parent
if str(parent_dir) not in sys.path:
sys.path.append(str(parent_dir))
import X3_Customizer
# TODO: swap to Path.
this_dir = os.path.normpath(os.path.dirname(__file__))
def Make(*args):
# TODO:
# Make a variation on the simple doc which has some formatting for
# the egosoft forum, including code blocks and removing newlines
# when the next line starts with a lowercase character.
# Make a list of lines or text blocks to print out.
doc_lines = []
# Also include a simple version, which will truncate
# the transform descriptions, aimed at providing a summary which
# is suitable for posting.
# Git appears to expect a README.md file; this can be used to
# generate that, although one directory up.
doc_short_lines = []
# Set the indent type. A single spaces for now.
# Avoid indenting by 4 unless wanting a code block, for the simple
# file that gets markdowned.
indent = ' '
def Make_Horizontal_Line(include_in_simple = True):
'Adds a horizontal line, with extra newline before and after.'
# TODO: maybe swap to a bunch of dashes, for a better look
# in the raw text format. This requires a newline before
# the dashes to avoid upscaling prior text.
this_line = '\n***\n'
doc_lines.append(this_line)
if include_in_simple:
doc_short_lines.append(this_line)
def Add_Line(line, indent_level = 0, include_in_simple = True):
'Add a single line to the files, including any newlines.'
# Prefix with a number of indents.
this_line = indent * indent_level + line
doc_lines.append(this_line)
if include_in_simple:
doc_short_lines.append(this_line)
def Add_Lines(text_block, indent_level = 0,
include_in_simple = True,
only_header_in_simple = False,
merge_lines = False):
'''
Record a set of lines from text_block, with indent, splitting on
newlines. May not include all starting or ending newlines, depending
on behavior of splitlines(). Starting newlines are explicitly
ignored.
If include_in_simple == True, the simple file will not have any
lines added.
If only_header_in_simple == True, the simple file will not have
any lines added past the first empty line following a text line,
eg. only the first group of text lines are included.
If merge_lines == True, this will attempt to merge lines together
that appear to be part of the same paragraph.
'''
# Merge before further processing.
if merge_lines:
text_block = Merge_Lines(text_block)
# Flag for when an empty line is found.
# This will not count any pruned starting empty lines, eg. when
# triple quotes are used they tend to put an initial empty line.
empty_line_found = False
non_empty_line_found = False
# Loop over the lines.
for line in text_block.splitlines():
# Note if this line has contents.
if not non_empty_line_found and line.strip():
non_empty_line_found = True
# Skip until a non empty line found.
if not non_empty_line_found:
continue
# Note if this is an empty line.
if not empty_line_found and not line.strip():
empty_line_found = True
# Prefix with a number of indents.
this_line = indent * indent_level + line
# Add to the main document.
doc_lines.append(this_line)
if include_in_simple:
# Add to the short document only if including everything or
# an empty line not hit yet.
if not (only_header_in_simple and empty_line_found):
doc_short_lines.append(this_line)
def Record_Func(function,
indent_level = 0,
end_with_empty_line = True,
include_in_simple = False):
'''
Adds lines for a function name with docstring and requirements.
If include_in_simple == True, the simple file is skipped entirely.
Otherwise, the simple file will get a truncated name with the initial
part of the docstring, and no requirement list.
'''
# Get the name as-is.
# Put an asterix in front for markdown.
name_line = '* ' + function.__name__
# Can add any incompatabilities as well, and stick these
# in the simple version.
if hasattr(function, '_compatabilities'):
# To avoid fluffing up the simple readme too much, these will
# be placed on the same line as the name, following it in
# parenthesis.
# Eg. 'Restore_Vanilla_Tuning_Pricing (incompatible with: Vanilla, LU)'
# TODO: consider if it would be better to give just compatibility
# flags always (removes the 'incompatible with' phrase, but more
# fluff for transforms that are always compatible).
if any(x == False for x in function._compatabilities.values()):
name_line += ' (incompatible with: {})'.format(
# Include the name tags for any False flags.
# Original dict should hopefully have been in a nice
# order.
', '.join([x
for x,y in function._compatabilities.items()
if y == False])
)
Add_Line(name_line, indent_level,
include_in_simple = include_in_simple)
# If there are required files, print them.
if hasattr(function, '_file_names'):
# For markdown, don't want this attached to the file name,
# but also don't want it causing an extra list indent on
# the docstring. An extra newline and a healthy indent
# seems to work.
Add_Line('', include_in_simple = False)
Add_Line('{}Requires: {}'.format(
indent * (indent_level + 1),
# Join the required file names with commas if
# there are any, else print None.
', '.join(function._file_names)
if function._file_names else 'None'),
indent_level +1,
include_in_simple = False
)
# Stick another newline, then the function docstring, maybe
# truncated for the simple file.
Add_Line('', include_in_simple = include_in_simple)
Add_Lines(function.__doc__, indent_level +1,
include_in_simple = include_in_simple,
only_header_in_simple = True,
# Get rid of excess newlines.
merge_lines = True
)
if end_with_empty_line:
Add_Line('')
# Grab the main docstring.
# Add in the version number.
main_doc = X3_Customizer.__doc__.replace(
'X3 Customizer',
'X3 Customizer {}'.format(X3_Customizer.Change_Log.Get_Version()),
# Only change the first spot, the title line.
1)
# TODO: figure out how to split off the example tree.
Add_Lines(main_doc, merge_lines = True)
# Add a note for the simple documentation to point to the full one.
doc_short_lines.append('\nFull documentation found in Documentation.md.')
# Print out the example module early, to be less scary.
# The example will accompany the simple version, since it is a good way
# to express what the customizer is doing.
Make_Horizontal_Line()
Add_Line('Example input file:')
# Need a newline before the code, otherwise the code block
# isn't made right away (the file header gets lumped with the above).
Add_Line('')
with open(os.path.join(this_dir,'..','input_scripts',
'Example_Transforms.py'), 'r') as file:
# Put in 4 indents to make a code block.
Add_Lines(file.read(), indent_level = 4)
# Grab any setup methods.
# Skip this for the simple summary.
Make_Horizontal_Line(include_in_simple = False)
Add_Line('Setup methods:', include_in_simple = False)
Add_Line('', include_in_simple = False)
# For now, just the Set_Path method.
Record_Func(X3_Customizer.Set_Path, indent_level = 2,
include_in_simple = False)
# TODO: full settings.
# Grab the various transform functions.
# This can grab every item in Transforms that has been decorated with
# Transform_Wrapper.
category_transforms_dict = defaultdict(list)
for item_name in dir(X3_Customizer.Transforms):
item = getattr(X3_Customizer.Transforms, item_name)
# Skip non-transforms.
# Can check for the _category attribute, attached by the decorator.
if not hasattr(item, '_category'):
continue
# Skip if the file name starts with an underscore, indicating
# an experimental transform.
if item.__name__[0] == '_':
continue
# Record this transform for the category.
category_transforms_dict[item._category].append(item)
# Can now print out by category.
for category, transform_list in sorted(category_transforms_dict.items()):
# Put a header for the category transform list.
Make_Horizontal_Line()
Add_Line('{} Transforms:'.format(category))
Add_Line('')
# Loop over the transforms in the category, sorted
# by their name.
for transform in sorted(transform_list, key = lambda k: k.__name__):
# Add the text.
Record_Func(transform, indent_level = 1, include_in_simple = True)
# Print out the change log.
Make_Horizontal_Line()
Add_Lines(X3_Customizer.Change_Log.__doc__, merge_lines = True)
# Print out the license.
# The simple version will skip this.
# This probably isn't needed if there is a license file floating around
# in the repository; remove for now.
# Make_Horizontal_Line(include_in_simple = False)
# with open(os.path.join('..','License.txt'), 'r') as file:
# Add_Lines(file.read(), include_in_simple = False)
# Get a set of lines suitable for the egosoft forum thread,
# using BB code.
doc_bb_lines = Get_BB_Text(doc_short_lines)
# Prefix with some extra lines for the forum.
doc_bb_lines = [
'Download source from github:',
'[url]https://github.com/bvbohnen/X3_Customizer[/url]',
'Compiled release (64-bit Windows):',
'[url]https://github.com/bvbohnen/X3_Customizer/releases[/url]',
'Full documentation:',
'[url]https://github.com/bvbohnen/X3_Customizer/blob/master/Documentation.md[/url]',
'',
'',
] + doc_bb_lines
# Write out the full doc.
# Put these 1 directory up to separate from the code.
with open(os.path.join(this_dir,'..','Documentation.md'), 'w') as file:
file.write('\n'.join(doc_lines))
# Write out the simpler readme.
with open(os.path.join(this_dir,'..','README.md'), 'w') as file:
file.write('\n'.join(doc_short_lines))
# Write out the BB version, suitable for copy/paste.
with open(os.path.join(this_dir,'..','for_egosoft_forum.txt'), 'w') as file:
file.write('\n'.join(doc_bb_lines))
return
def Merge_Lines(text_block):
'''
To get a better text file from the python docstrings, with proper
full lines and wordwrap, do a pass over the text block and
do some line joins.
General idea is that two lines can merge if:
-Both have normal text characters (eg. not '---').
-Not in a code block (4+ space indent series of lines outside of
a list).
-Second line does not start a sublist (starts with -,*,etc.).
Note: markdown merge rules are more complicated, but this should be
sufficient for the expected text formats.
This should not be called on code blocks.
This will also look for and remove <code></code> tags, a temporary
way to specify in docstrings sections not to be line merged.
'''
# List of lines to merge with previous.
merge_line_list = []
# Note if the prior line had text.
prior_line_had_text = False
# Note if a code block appears active.
code_block_active = False
# Convert the input to a list.
line_list = [x for x in text_block.splitlines()]
for line_number, line in enumerate(line_list):
# Get rid of indent spacing.
strip_line = line.strip()
merge = True
# If this is a <code> tag, start a code block, and remove
# the tag.
if strip_line == '<code>':
code_block_active = True
merge = False
line_list[line_number] = ''
strip_line = ''
elif strip_line == '</code>':
code_block_active = False
merge = False
line_list[line_number] = ''
strip_line = ''
# When a code block is active, don't merge.
elif code_block_active:
merge = False
# Skip the first line; nothing prior to merge with.
elif line_number == 0:
merge = False
# If the line is empty, leave empty.
elif not strip_line:
merge = False
# If the line starts with a sublist character, don't merge.
elif strip_line[0] in ['*','-']:
merge = False
# If the prior line didn't have text, don't merge.
elif not prior_line_had_text:
merge = False
# If merging, record the line.
if merge:
merge_line_list.append(line_number)
# Update the prior line status.
prior_line_had_text = len(strip_line) > 0
# Second pass will do the merges.
# This will aim to remove indentation, and replace with a single space.
# This will delete lines as going, requiring the merge_line numbers to be
# adjusted by the lines removed prior. This can be captured with an
# enumerate effectively.
for lines_removed, merge_line in enumerate(merge_line_list):
# Adjust the merge_line based on the current line list.
this_merge_line = merge_line - lines_removed
# Get the lines.
prior_line = line_list[this_merge_line-1]
this_line = line_list[this_merge_line]
# Remove spacing at the end of the first, beginning of the second.
prior_line = prior_line.rstrip()
this_line = this_line.lstrip()
# Join and put back.
line_list[this_merge_line-1] = prior_line + ' ' + this_line
# Delete the unused line.
line_list.pop(this_merge_line)
# Return as a raw text block.
return '\n'.join(line_list)
def Get_BB_Text(line_list):
'''
Converts a list of markdown suitable lines to forum BB code
suitable lines.
'''
# Version of short for forum BB code.
# To reduce complexity explosion for all these docs, this one modify
# existing text rather than being generated on the first pass.
bb_lines = []
# Tag for if a list is in use.
list_active = False
# Indent of the list.
list_indent = None
# Tag for if a code section is in use.
code_active = False
# Tag for if the change log section is active.
changelog_active = False
# Tag for if a transform section is active.
transform_active = False
# The running line index, used to join list/code tags with
# the next line if blank.
index = 0
def Add_Tag(tag):
'''
Pushes a tag, and advances past the next input line if it is blank,
or tries to replace the last line if blank.
'''
nonlocal index
# Can psuedo-join with the next line by just advancing the
# index to skip it when blank.
# -Removed; doesn't make too much sense.
#if index + 1 < len(line_list) and not line_list[index + 1]:
# index += 1
# bb_lines.append(tag)
# Check if the prior line was blank and overwrite it.
if bb_lines and not bb_lines[-1]:
bb_lines[-1] = tag
# Otherwise just add another line as normal.
else:
bb_lines.append(tag)
return
def Open_List():
'Open a new list.'
nonlocal list_active
if not list_active:
Add_Tag('[list]')
list_active = True
def Close_List():
'Close a current list.'
nonlocal list_active
if list_active:
Add_Tag('[/list]')
list_active = False
def Open_Code():
'Open a new code section.'
nonlocal code_active
if not code_active:
Add_Tag('[code]')
code_active = True
def Close_Code():
'Close a current code section.'
nonlocal code_active
if code_active:
Add_Tag('[/code]')
code_active = False
def Bold(string):
'Apply bold tags to a string.'
return '[b]{}[/b]'.format(string)
def Underline(string):
'Apply underline tags to a string.'
return '[u]{}[/u]'.format(string)
def Color(string, color):
'Apply color tags to a string. Should go inside other tags.'
return '[color={}]{}[/color]'.format(color, string)
def Small(string):
'Apply small font tags to a string. Should go inside other tags.'
# Use BB default small/large sizes.
# Update: the latest BB software treats this as a %.
return '[size=75]{}[/size]'.format(string)
def Large(string):
'Apply large font tags to a string. Should go inside other tags.'
return '[size=200]{}[/size]'.format(string)
def Record(line):
'Record a line for the BB lines.'
bb_lines.append(line)
# Work through the original lines, making edits and recording
# the new lines.
# This will be index based, to support look-ahead and line skips.
while index < len(line_list):
line = line_list[index]
# Get a whitespace stripped version for convenience.
strip_line = line.strip()
# Empty lines get handled first.
# Skip newlines while lists are active; the egosoft
# forums always hide the later newlines in a list entry,
# which just makes earlier ones awkward.
# This assumes there are no newlines in the middle of
# transform description text, which there shouldn't be
# for simple documentation.
if not strip_line:
if not list_active:
Record('')
# At every '***', any active list or code is closed.
elif strip_line == '***':
Close_Code()
Close_List()
changelog_active = False
transform_active = False
# Drop the *s for now.
Record('')
# Otherwise if code is active, leave the line unchanged.
elif code_active:
Record(line)
# Special cases:
# Hype up the main heading.
elif line.startswith('X3 Customizer'):
# Note: tag order is somewhat strict (though in examples it
# shouldn't be).
# Innermost is text size, then color, then bold.
line = Large(line)
# The color feels a little tacky; large/bold is enough.
#line = Color(line, 'yellow')
line = Bold(line)
Record(line)
# The underline under the heading can be swapped to a blank line.
# In markdown this boldens the line above it; in BB the bolding
# is manual.
elif strip_line.startswith('---'):
Record('')
# The 'Full documentation...' line breaks out of a list.
elif strip_line.startswith('Full documentation'):
Close_List()
Record(line)
# Look for any line that is a term ending in ':', but not
# starting with '*' or similar. This will be a header
# for a list (or maybe code), including closing a prior list.
elif strip_line[0] not in ['-','*'] and strip_line[-1] == ':':
Close_List()
# Bold the list header.
Record( Bold( line) )
# The example input opens a code section, else open a list.
if strip_line.startswith('Example input file'):
Open_Code()
else:
Open_List()
# Note when in the change log, to suppress extra formatting.
if strip_line == 'Change Log:':
changelog_active = True
# Note when in a transform or other plugin section,
# to add formatting.
if any(x in strip_line for x in ['Transforms','Analyses','Utilities']):
transform_active = True
# If the line starts with '*', it is a major list entry.
elif strip_line[0] == '*':
# Record the indent of the list, so it can be closed
# whenever indent is reduced by a line.
list_indent = len(line) - len(line.lstrip())
# Note: any format tags need to be applied after the *,
# so prune the * first, handle formatting, then add
# the [*] back on.
new_line = strip_line.replace('*','',1)
# Apply formatting.
if transform_active:
# Note: color appears not to work if there are other
# tags inside it, so put color wrapper first.
# Note: color and underline is ugly; just do one or
# the other.
new_line = Color(new_line, 'yellow')
#new_line = Underline(new_line)
# Make the change log items small.
if changelog_active:
new_line = Small(new_line)
# Stick the [*] back on.
Record('[*]' + new_line)
# Other lines can record as-is.
else:
# Close a list if this line has a smaller indent.
if list_active and (len(line) - len(line.lstrip()) < list_indent):
Close_List()
# Make the change log items small.
if changelog_active:
line = Small(line)
Record(line)
# Advance the index for next iteration.
index += 1
# If a list is active at the end, close it.
Close_List()
return bb_lines
if __name__ == '__main__':
Make(sys.argv)
| 37.135258 | 92 | 0.610395 |
84ce543197f01f8d3d94d37d0fd0990de295236a | 5,174 | py | Python | tools/namecoin_dump.py | lavajumper/sexcoin-abe | 531e825a44d0d36efa192425aeb7bcfa7922c968 | [
"MIT"
] | null | null | null | tools/namecoin_dump.py | lavajumper/sexcoin-abe | 531e825a44d0d36efa192425aeb7bcfa7922c968 | [
"MIT"
] | null | null | null | tools/namecoin_dump.py | lavajumper/sexcoin-abe | 531e825a44d0d36efa192425aeb7bcfa7922c968 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Dump the Namecoin name data to standard output.
# Copyright(C) 2011 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
import sys
import logging
import Abe.DataStore
import Abe.readconf
from Abe.deserialize import script_GetOp, opcodes
NAME_NEW = opcodes.OP_1
NAME_FIRSTUPDATE = opcodes.OP_2
NAME_UPDATE = opcodes.OP_3
NAME_SCRIPT_MIN = '\x51'
NAME_SCRIPT_MAX = '\x54'
BLOCKS_TO_EXPIRE = 12000
def iterate_name_updates(store, logger, chain_id):
for height, tx_pos, txout_pos, script in store.selectall("""
SELECT cc.block_height, bt.tx_pos, txout.txout_pos,
txout.txout_scriptPubKey
FROM chain_candidate cc
JOIN block_tx bt ON (cc.block_id = bt.block_id)
JOIN txout ON (bt.tx_id = txout.tx_id)
WHERE cc.chain_id = ?
AND txout_scriptPubKey >= ? AND txout_scriptPubKey < ?
ORDER BY cc.block_height, bt.tx_pos, txout.txout_pos""",
(chain_id, store.binin(NAME_SCRIPT_MIN),
store.binin(NAME_SCRIPT_MAX))):
height = int(height)
tx_pos = int(tx_pos)
txout_pos = int(txout_pos)
i = script_GetOp(store.binout(script))
try:
name_op = i.next()[0]
if name_op == NAME_NEW:
continue # no effect on name map
elif name_op == NAME_FIRSTUPDATE:
is_first = True
name = i.next()[1]
newtx_hash = i.next()[1]
#rand = i.next()[1] # XXX documented as optional; is it?
value = i.next()[1]
elif name_op == NAME_UPDATE:
is_first = False
name = i.next()[1]
value = i.next()[1]
else:
logger.warning("Unexpected first op: %s", repr(name_op))
continue
except StopIteration:
logger.warning("Strange script at %d:%d:%d",
height, tx_pos, txout_pos)
continue
yield (height, tx_pos, txout_pos, is_first, name, value)
def get_expiration_depth(height):
if height < 24000:
return 12000
if height < 48000:
return height - 12000
return 36000
def dump(store, logger, chain_id):
from collections import deque
top = store.get_block_number(chain_id)
expires = {}
expiry_queue = deque() # XXX unneeded synchronization
for x in iterate_name_updates(store, logger, chain_id):
height, tx_pos, txout_pos, is_first, name, value = x
while expiry_queue and expiry_queue[0]['block_id'] < height:
e = expiry_queue.popleft()
dead = e['name']
if expires[dead] == e['block_id']:
print repr((e['block_id'], 'Expired', dead, None))
if expires.get(name, height) < height:
type = 'Resurrected'
elif is_first:
type = 'First'
else:
type = 'Renewed'
print repr((height, type, name, value))
expiry = height + get_expiration_depth(height)
expires[name] = expiry
expiry_queue.append({'block_id': expiry, 'name': name, 'value': value})
for e in expiry_queue:
if expires[e['name']] > e['block_id']:
pass
elif e['block_id'] <= top:
print repr((e['block_id'], 'Expired', e['name'], None))
else:
print repr((e['block_id'], 'Until', e['name'], e['value']))
def main(argv):
logging.basicConfig(level=logging.DEBUG)
conf = {
'chain_id': None,
}
conf.update(Abe.DataStore.CONFIG_DEFAULTS)
args, argv = Abe.readconf.parse_argv(argv, conf, strict=False)
if argv and argv[0] in ('-h', '--help'):
print "Usage: namecoin_dump.py --dbtype=MODULE --connect-args=ARGS"
return 0
elif argv:
sys.stderr.write(
"Error: unknown option `%s'\n"
"See `namecoin_dump.py --help' for more information.\n"
% (argv[0],))
return 1
store = Abe.DataStore.new(args)
logger = logging.getLogger(__name__)
if args.chain_id is None:
row = store.selectrow(
"SELECT chain_id FROM chain WHERE chain_name = 'Namecoin'")
if row is None:
raise Exception("Can not find Namecoin chain in database.")
args.chain_id = row[0]
dump(store, logger, args.chain_id)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 35.438356 | 79 | 0.597603 |
58d65dc9204cf48a576a592eb9f906a41d16c2a6 | 183 | py | Python | src/v8unpack/MetaDataObject/Catalog.py | fishca/v8unpack-1 | 51e1bb6b57be170f0c19be20649e18abdda04668 | [
"MIT"
] | null | null | null | src/v8unpack/MetaDataObject/Catalog.py | fishca/v8unpack-1 | 51e1bb6b57be170f0c19be20649e18abdda04668 | [
"MIT"
] | null | null | null | src/v8unpack/MetaDataObject/Catalog.py | fishca/v8unpack-1 | 51e1bb6b57be170f0c19be20649e18abdda04668 | [
"MIT"
] | null | null | null | from ..MetaDataObject.core.Container import Container
class Catalog(Container):
pass
@classmethod
def get_decode_header(cls, header):
return header[0][1][9][1]
| 18.3 | 53 | 0.693989 |
ed7f63bac3221447a854c8031353988336511af0 | 1,831 | py | Python | models/experiments/prob_unet.py | gigantenbein/UNet-Zoo | d157c22ef8041ed743aa7bbcf377f0f8ad85e755 | [
"Apache-2.0"
] | 20 | 2020-02-16T07:20:23.000Z | 2022-03-14T04:11:02.000Z | models/experiments/prob_unet.py | suyanzhou626/UNet-Zoo | 76d23952d90a45a01da1cc2926b4d3a24a1adb75 | [
"Apache-2.0"
] | 6 | 2021-06-08T21:03:07.000Z | 2022-03-17T13:28:33.000Z | models/experiments/prob_unet.py | suyanzhou626/UNet-Zoo | 76d23952d90a45a01da1cc2926b4d3a24a1adb75 | [
"Apache-2.0"
] | 5 | 2020-03-20T02:04:49.000Z | 2021-10-20T17:37:52.000Z | import torch
import torch.nn as nn
from models.probabilistic_unet import ProbabilisticUnet
from utils import normalise_image
from data.lidc_data import lidc_data
experiment_name = 'ProbabilisticUnet'
log_dir_name = 'lidc'
data_loader = lidc_data
# number of filter for the latent levels, they will be applied in the order as loaded into the list
filter_channels = [32, 64, 128, 192, 192, 192, 192]
latent_levels = 1
latent_dim = 6
iterations = 5000000
n_classes = 2
num_labels_per_subject = 4
no_convs_fcomb = 3 # not used
beta = 1.0 # not used
use_reversible = False
exponential_weighting = True
# use 1 for grayscale, 3 for RGB images
input_channels = 1
epochs_to_train = 20
batch_size = 12
image_size = (1, 128, 128)
augmentation_options = {'do_flip_lr': True,
'do_flip_ud': True,
'do_rotations': True,
'do_scaleaug': True,
'nlabels': n_classes}
input_normalisation = normalise_image
validation_samples = 16
num_validation_images = 100
logging_frequency = 1000
validation_frequency = 1000
weight_decay = 10e-5
pretrained_model = None
# model
model = ProbabilisticUnet
# # number of filter for the latent levels, they will be applied in the order as loaded into the list
# filter_channels = [32, 64, 128, 192]
# latent_levels = 1 # TODO: this is passed to latent dim and latent levels, should not be like that
# resolution_level = 7
#
# n_classes = 2
# no_convs_fcomb = 4
# beta = 10.0 # for loss function
# #
# use_reversible = False
#
# # use 1 for grayscale, 3 for RGB images
# input_channels = 1
#
# epochs_to_train = 20
# batch_size = [12, 1, 1]
#
# validation_samples = 16
#
# logging_frequency = 10
# validation_frequency = 100
#
# input_normalisation = normalise_image
#
# # model
# model = ProbabilisticUnet
| 22.604938 | 101 | 0.712725 |
be7f4045f7e83a7b50d0655d5c3d14d51a3a790b | 1,170 | py | Python | tests/storage/cases/test_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/storage/cases/test_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/storage/cases/test_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon.json')
def test_storage_encoding_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1LGxWyzTTfg5J1tyPhuCLVnQgpUoUp7xoZ_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 41.785714 | 112 | 0.757265 |
202fce92e1b22ea122d8f880455e939cb002de01 | 1,862 | py | Python | YOLO/build.py | zz00zws/magic_learning | c048d244e9abd38079b5f195e7b2bbfebc616310 | [
"MIT"
] | 1 | 2019-12-02T08:09:54.000Z | 2019-12-02T08:09:54.000Z | YOLO/build.py | zz00zws/magic_learning | c048d244e9abd38079b5f195e7b2bbfebc616310 | [
"MIT"
] | null | null | null | YOLO/build.py | zz00zws/magic_learning | c048d244e9abd38079b5f195e7b2bbfebc616310 | [
"MIT"
] | null | null | null | import torch
import os
import PIL.Image as pimg
import PIL.ImageDraw as draw
path_label='./data/Anno_XML'
path_img='./data/Original'
y=True
#yy={'pedestrian\n','tree\n','car\n','bicycle\n','store\n','building\n','road\n','sky\n','sidewalk\n'}
yy={'pedestrian\n','car\n','bicycle\n'}
dic=dict(zip(yy,range(3)))
f = open(r'./label.txt','w+')
for i in os.listdir(path_label):
xx=[]
ss=[]
f1=open(os.path.join(path_label,i))
a=f1.readlines()
x=[]
t=False
for j,k in enumerate(a):
if k=='<name>\n':
if a[j+1] in yy:
if x!=[]:
x=torch.tensor(list(map(int,x))).view(-1,2)
f.write(str(torch.max(x[:,0]).item()/2)+' ')
f.write(str(torch.max(x[:,1]).item()/2)+' ')
f.write(str(torch.min(x[:,0]).item()/2)+' ')
f.write(str(torch.min(x[:,1]).item()/2)+' ')
y=True
t=True
x=[]
f.write(str(dic[a[j+1]])+' ')
else:
y=False
if k=='<x>\n' and y:
x.append(a[j+1])
if k=='<y>\n' and y:
x.append(a[j+1])
if x!=[]:
x=torch.tensor(list(map(int,x))).view(-1,2)
f.write(str(torch.max(x[:,0]).item()/2)+' ')
f.write(str(torch.max(x[:,1]).item()/2)+' ')
f.write(str(torch.min(x[:,0]).item()/2)+' ')
f.write(str(torch.min(x[:,1]).item()/2)+' ')
if t:
# img=pimg.open(os.path.join(path_img,i.split('_')[0]+'.JPG'))
# img=img.resize((640,480))
# img.save(os.path.join(path_img,i.split('_')[0]+'.JPG'))
f.write(i.split('_')[0]+'.JPG ')
f.write('\n')
print(i.split('_')[0]+'.JPG')
f.close()
| 22.433735 | 103 | 0.436627 |
76c6afe093c0225a3e74afb74cb3e160efc33c3a | 7,463 | py | Python | tests/test_cwe.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 8 | 2021-05-18T02:22:03.000Z | 2021-09-11T02:49:04.000Z | tests/test_cwe.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-04-26T04:38:35.000Z | 2021-04-26T04:38:35.000Z | tests/test_cwe.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-11-10T02:28:47.000Z | 2021-11-10T02:28:47.000Z | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jmespath
from unittest import TestCase
from .common import event_data, BaseTest
from c7n.cwe import CloudWatchEvents
class CloudWatchEventTest(BaseTest):
def test_event_rule_tags(self):
factory = self.replay_flight_data('test_cwe_rule_tags')
client = factory().client('events')
policy = self.load_policy(
{
'name': 'cwe-rule',
'resource': 'aws.event-rule',
'filters': [
{'tag:App': 'absent'},
{'Name': 'cloud-custodian-mailer'}],
'actions': [
{'type': 'tag', 'tags': {'App': 'Custodian'}}]
}, session_factory=factory, config={'region': 'us-west-2'})
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = {t['Key']: t['Value'] for t in
client.list_tags_for_resource(
ResourceARN=policy.resource_manager.get_arns(resources)[0]).get(
'Tags')}
self.assertEqual(tags, {'App': 'Custodian'})
def test_target_cross_account_remove(self):
session_factory = self.replay_flight_data("test_cwe_rule_target_cross")
client = session_factory().client("events")
policy = self.load_policy(
{
"name": "cwe-cross-account",
"resource": "event-rule-target",
"filters": [{"type": "cross-account"}],
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
targets = client.list_targets_by_rule(Rule=resources[0]["c7n:parent-id"]).get(
"Targets"
)
self.assertEqual(targets, [])
class CloudWatchEventsFacadeTest(TestCase):
def test_get_ids(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": event_data("event-cloud-trail-run-instances.json")},
{"type": "cloudtrail", "events": ["RunInstances"]},
),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_with_details_expr(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'detail.responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}]}),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_without_details_expr(self):
self.assertEqual(
sorted(CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}
]})),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_multiple_events(self):
d = event_data("event-cloud-trail-run-instances.json")
d["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": d},
{
"type": "cloudtrail",
"events": [
# wrong event name
{
"source": "ec2.amazonaws.com",
"event": "CreateTags",
"ids": "requestParameters.resourcesSet.items[].resourceId",
},
# wrong event source
{
"source": "ecs.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items",
},
# matches no resource ids
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet2.items[].instanceId",
},
# correct
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
},
# we don't fall off the end
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[]",
},
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_ec2_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-instance-state.json"), {"type": "ec2-instance-state"}
),
["i-a2d74f12"],
)
def test_asg_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-asg-instance-failed.json"),
{
"type": "asg-instance-state",
"events": ["EC2 Instance Launch Unsuccessful"],
},
),
["CustodianTest"],
)
def test_custom_event(self):
d = {"detail": event_data("event-cloud-trail-run-instances.json")}
d["detail"]["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
d,
{
"type": "cloudtrail",
"events": [
{
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
"source": "ec2.amazonaws.com",
}
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_non_cloud_trail_event(self):
for event in ["event-instance-state.json", "event-scheduled.json"]:
self.assertFalse(CloudWatchEvents.match(event_data(event)))
def test_cloud_trail_resource(self):
self.assertEqual(
CloudWatchEvents.match(event_data("event-cloud-trail-s3.json")),
{
"source": "s3.amazonaws.com",
"ids": jmespath.compile("detail.requestParameters.bucketName"),
},
)
| 37.883249 | 87 | 0.491491 |
48a0030c6576b5bda33117ff243be7420631e9d9 | 1,747 | py | Python | src/user/urls.py | jhernandez18p/mobyapp | 4add7bd3b3f1e933bbb6941674bf84f4c4462685 | [
"MIT"
] | null | null | null | src/user/urls.py | jhernandez18p/mobyapp | 4add7bd3b3f1e933bbb6941674bf84f4c4462685 | [
"MIT"
] | 7 | 2020-06-05T17:31:06.000Z | 2022-03-11T23:16:34.000Z | src/user/urls.py | jhernandez18p/mobyapp | 4add7bd3b3f1e933bbb6941674bf84f4c4462685 | [
"MIT"
] | null | null | null | from django.contrib.auth import views as auth_views
from django.urls import path, re_path, include
from .views import custom_login,custom_logout,custom_register, \
ActivateAccountView, modal_cookie, error, thanks, newsletter,\
activate, current_user, UserList
app_name = 'auth'
urlpatterns = [
path('modal', modal_cookie, name='modal-cookie'),
path('error', error, name='error'),
path('newsletter', newsletter, name='newsletter'),
path('gracias', thanks, name='thanks'),
path('login/', custom_login, name='login'),
path('salir/', custom_logout, name='logout'),
path('registro/', custom_register, name='register'),
path('password_reset/done/', auth_views.password_reset_done,
name='password_reset_done'
),
path('password_reset_complete/done/', auth_views.password_reset_complete,
name='password_reset_complete'
),
path('password_reset/', auth_views.password_reset, {
'post_reset_redirect':'auth:password_reset_done',}, name='password_reset'
),
re_path(r'^password_reset_confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm,
{
'post_reset_redirect':'auth:password_reset_complete'
},
name='password_reset_confirm'
),
re_path(r'^activar-cuenta/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ActivateAccountView.as_view(), name='activate_account'
),
re_path(r'^activar/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
activate, name='activate'
),
# JWT
path('current_user/', current_user),
path('users/', UserList.as_view()),
] | 39.704545 | 116 | 0.65083 |
33e16283d0f4beef468c99a7827a995d3b37364c | 647 | py | Python | HoldMyPotion/app/migrations/0001_initial.py | holdmypotion/Blog_App | a1ef39293e662534153b83fdd9f486cb87323f7d | [
"MIT"
] | null | null | null | HoldMyPotion/app/migrations/0001_initial.py | holdmypotion/Blog_App | a1ef39293e662534153b83fdd9f486cb87323f7d | [
"MIT"
] | null | null | null | HoldMyPotion/app/migrations/0001_initial.py | holdmypotion/Blog_App | a1ef39293e662534153b83fdd9f486cb87323f7d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-27 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField()),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
]
| 25.88 | 114 | 0.553323 |
ed73b3c5f96972b1292cc2a8984f28be5e209422 | 9,788 | py | Python | tensorflow_data_validation/statistics/generators/cross_feature_stats_generator.py | Coulbe/data-validation | a2c5e5dd2cc50cce8a7eab5c7e72a0cb11b74b2c | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/generators/cross_feature_stats_generator.py | Coulbe/data-validation | a2c5e5dd2cc50cce8a7eab5c7e72a0cb11b74b2c | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/generators/cross_feature_stats_generator.py | Coulbe/data-validation | a2c5e5dd2cc50cce8a7eab5c7e72a0cb11b74b2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes cross feature statistics.
We compute the following statistics for numeric feature crosses (only univalent
feature values are considered):
- Standard covariance. E[(X-E[X])*(Y-E[Y])]
- Pearson product-moment correlation coefficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import random
from typing import Dict, Iterable, List, Optional, Text
import numpy as np
import pyarrow as pa
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from pandas import DataFrame, Series # pylint: disable=g-multiple-import
import pandas as pd
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import stats_util
from tfx_bsl.arrow import array_util
from tensorflow_metadata.proto.v0 import path_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
class _PartialCrossFeatureStats(object):
"""Holds partial cross feature statistics for a feature cross."""
__slots__ = ['sum_x', 'sum_y', 'sum_square_x', 'sum_square_y', 'sum_xy',
'count']
def __init__(self):
self.sum_x = 0
self.sum_y = 0
self.sum_square_x = 0
self.sum_square_y = 0
self.sum_xy = 0
self.count = 0
def __iadd__(self, other: '_PartialCrossFeatureStats'
) -> '_PartialCrossFeatureStats':
"""Merges two partial cross feature statistics."""
self.sum_x += other.sum_x
self.sum_y += other.sum_y
self.sum_square_x += other.sum_square_x
self.sum_square_y += other.sum_square_y
self.sum_xy += other.sum_xy
self.count += other.count
return self
def update(self, feature_x: Series, feature_y: Series) -> None:
"""Updates partial cross feature statistics."""
self.sum_x += feature_x.sum()
self.sum_y += feature_y.sum()
self.sum_square_x += (feature_x ** 2).sum()
self.sum_square_y += (feature_y ** 2).sum()
self.sum_xy += (feature_x * feature_y).sum()
self.count += len(feature_x)
CrossFeatureStatsGeneratorAccumulator = Dict[types.FeatureCross,
_PartialCrossFeatureStats]
class CrossFeatureStatsGenerator(stats_generator.CombinerStatsGenerator):
"""A combiner statistics generator that computes cross feature statistics.
"""
def __init__(
self, # pylint: disable=useless-super-delegation
name: Text = 'CrossFeatureStatsGenerator',
feature_crosses: Optional[List[types.FeatureCross]] = None,
sample_rate: float = 0.1) -> None:
"""Initializes cross feature statistics generator.
Args:
name: An optional unique name associated with the statistics generator.
feature_crosses: List of numeric feature crosses for which to compute
statistics. If None, we compute statistics for all numeric crosses.
sample_rate: Sample rate.
"""
super(CrossFeatureStatsGenerator, self).__init__(name, None)
self._feature_crosses = feature_crosses
self._features_needed = None
if self._feature_crosses:
self._features_needed = set()
for (feat_x, feat_y) in self._feature_crosses:
self._features_needed.add(feat_x)
self._features_needed.add(feat_y)
self._sample_rate = sample_rate
# Create an accumulator, which maps feature name to the partial stats
# associated with the feature.
def create_accumulator(self) -> CrossFeatureStatsGeneratorAccumulator:
return {}
def _get_univalent_values_with_parent_indices(
self, examples: pa.RecordBatch) -> Dict[types.FeatureName, DataFrame]:
"""Extracts univalent values for each feature along with parent indices."""
result = {}
for feature_name, feat_arr in zip(examples.schema.names, examples.columns):
if (self._features_needed is not None and
feature_name not in self._features_needed):
continue
feature_type = stats_util.get_feature_type_from_arrow_type(
feature_name, feat_arr.type)
# Only consider crosses of numeric features.
# TODO(zhuo): Support numeric features nested under structs.
if feature_type in (None, statistics_pb2.FeatureNameStatistics.STRING,
statistics_pb2.FeatureNameStatistics.STRUCT):
continue
value_lengths = np.asarray(array_util.ListLengthsFromListArray(feat_arr))
univalent_parent_indices = set((value_lengths == 1).nonzero()[0])
# If there are no univalent values, continue to the next feature.
if not univalent_parent_indices:
continue
flattened, value_parent_indices = arrow_util.flatten_nested(
feat_arr, True)
non_missing_values = np.asarray(flattened)
if feature_type == statistics_pb2.FeatureNameStatistics.FLOAT:
# Remove any NaN values if present.
non_nan_mask = ~np.isnan(non_missing_values)
non_missing_values = non_missing_values[non_nan_mask]
value_parent_indices = value_parent_indices[non_nan_mask]
df = pd.DataFrame({feature_name: non_missing_values,
'parent_index': value_parent_indices})
# Only keep the univalent feature values.
df = df[df['parent_index'].isin(univalent_parent_indices)]
result[feature_name] = df
return result
# Incorporates the input (an arrow RecordBatch) into the accumulator.
def add_input(
self, accumulator: CrossFeatureStatsGeneratorAccumulator,
examples: pa.RecordBatch
) -> Dict[types.FeatureCross, _PartialCrossFeatureStats]:
if random.random() > self._sample_rate:
return accumulator
# Cache the values and parent indices for each feature. We cache this to
# avoid doing the same computation for a feature multiple times in
# each cross.
features_for_cross = self._get_univalent_values_with_parent_indices(
examples)
# Generate crosses of numeric univalent features and update the partial
# cross stats.
if self._feature_crosses is not None:
feature_crosses = self._feature_crosses
else:
feature_crosses = itertools.combinations(
sorted(list(features_for_cross.keys())), 2)
for feat_name_x, feat_name_y in feature_crosses:
feat_cross = (feat_name_x, feat_name_y)
if feat_cross not in accumulator:
accumulator[feat_cross] = _PartialCrossFeatureStats()
df_x, df_y = (features_for_cross[feat_name_x],
features_for_cross[feat_name_y])
# Join based on parent index so that we have the value pairs
# corresponding to each example.
merged_df = pd.merge(df_x, df_y, on='parent_index')
# Update the partial cross stats.
accumulator[feat_cross].update(merged_df[feat_name_x],
merged_df[feat_name_y])
return accumulator
# Merge together a list of cross feature statistics.
def merge_accumulators(
self, accumulators: Iterable[CrossFeatureStatsGeneratorAccumulator]
) -> CrossFeatureStatsGeneratorAccumulator:
result = {}
for accumulator in accumulators:
for feat_cross, cross_feat_stats in accumulator.items():
if feat_cross not in result:
result[feat_cross] = cross_feat_stats
else:
result[feat_cross] += cross_feat_stats
return result
# Return final stats as a DatasetFeatureStatistics proto.
def extract_output(self,
accumulator: CrossFeatureStatsGeneratorAccumulator
) -> statistics_pb2.DatasetFeatureStatistics:
# Create a new DatasetFeatureStatistics proto.
result = statistics_pb2.DatasetFeatureStatistics()
for feat_cross, cross_feat_stats in accumulator.items():
# Construct the CrossFeatureStatistics proto from the partial
# cross feature stats.
cross_feat_stats_proto = result.cross_features.add()
path_x = path_pb2.Path()
path_x.step.append(feat_cross[0])
path_y = path_pb2.Path()
path_y.step.append(feat_cross[1])
cross_feat_stats_proto.path_x.CopyFrom(path_x)
cross_feat_stats_proto.path_y.CopyFrom(path_y)
cross_feat_stats_proto.count = cross_feat_stats.count
if cross_feat_stats.count > 0:
num_cross_stats_proto = statistics_pb2.NumericCrossStatistics()
covariance = (cross_feat_stats.sum_xy / cross_feat_stats.count) -\
(cross_feat_stats.sum_x / cross_feat_stats.count) *\
(cross_feat_stats.sum_y / cross_feat_stats.count)
num_cross_stats_proto.covariance = covariance
std_dev_x = math.sqrt(max(
0, (cross_feat_stats.sum_square_x / cross_feat_stats.count) -
math.pow(cross_feat_stats.sum_x / cross_feat_stats.count, 2)))
std_dev_y = math.sqrt(max(
0, (cross_feat_stats.sum_square_y / cross_feat_stats.count) -
math.pow(cross_feat_stats.sum_y / cross_feat_stats.count, 2)))
if std_dev_x != 0 and std_dev_y != 0:
correlation = covariance / (std_dev_x * std_dev_y)
num_cross_stats_proto.correlation = correlation
cross_feat_stats_proto.num_cross_stats.CopyFrom(num_cross_stats_proto)
return result
| 41.12605 | 79 | 0.717818 |
81a05cb13625964e54eb84c2cc0b856c8ff086ef | 1,037 | py | Python | count_tags.py | SURJBayArea/everyaction-tools | e50ad188dffcf8ff4b4a9de209f102c7fbd89488 | [
"Apache-2.0"
] | null | null | null | count_tags.py | SURJBayArea/everyaction-tools | e50ad188dffcf8ff4b4a9de209f102c7fbd89488 | [
"Apache-2.0"
] | null | null | null | count_tags.py | SURJBayArea/everyaction-tools | e50ad188dffcf8ff4b4a9de209f102c7fbd89488 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Returns count of csv records wih Action Network tags
Looks for column `can2_user_tags`
email,can2_user_tags
some.one@example.com,"#Trump, ?Direct Action, ?Organizing, Phone_Bank"
Assume tags separated by ", " which misses some tags with embedded "," like
"[Alpine] Donors from 7.18.19 - 10.1.19 (Created: July 22, 2019 1:04 PM)"
"""
import csv
import sys
tag_count = {}
total = 0
if len(sys.argv) == 1:
print("usage: count_tags [file ...] ")
sys.exit(-1)
for filename in sys.argv[1:]:
with open(filename, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
total += 1
for tag in row['can2_user_tags'].split(", "):
if tag:
tag_count[tag] = tag_count.get(tag, 0) + 1
tags_sorted = sorted(tag_count.items(),key=lambda x: x[1],reverse=True)
print("count,old,new")
for (tag, count) in tags_sorted:
print(f"{count},{tag}")
print(f"{len(tags_sorted)},Tags in {total} items")
| 26.589744 | 75 | 0.637416 |
3d1d9d4ef9360848ace76ab4f4e1b94565a92f00 | 5,010 | py | Python | hsdbi/base.py | timniven/hsdbi | a3974972a728f631f9cbced1cf8d88e9ccc37c47 | [
"MIT"
] | null | null | null | hsdbi/base.py | timniven/hsdbi | a3974972a728f631f9cbced1cf8d88e9ccc37c47 | [
"MIT"
] | null | null | null | hsdbi/base.py | timniven/hsdbi | a3974972a728f631f9cbced1cf8d88e9ccc37c47 | [
"MIT"
] | null | null | null | """Base classes."""
class RepositoryFacade:
"""Abstract base class for a Repository Facade.
From the Wikipedia entry on "Facade pattern":
'A facade is an object that provides a simplified interface to a
larger body of code...'
https://en.wikipedia.org/wiki/Facade_pattern
In this case, we provide a single point of access for all Repository
classes grouped in a conceptual unit, encapsulate the db connection,
provide a commit() function for saving changes, and implement the magic
methods __exit__ and __enter__ so this class is valid for use in a "with"
statement.
Collecting multiple repositories together might be viewed as an
inefficiency: it is simple enough to initialize one Repository class as and
when it is needed. Indeed, this is how I use repositories.
The Facade comes in handy where we want to share database context between
Repository classes.
Implementation details for Repository Facade classes will differ with the
database used. The intention is for a subclass to be defined for each
such case. See MySQLRepositoryFacade and MongoRepositoryFacade, for example.
"""
def __init__(self):
"""Create a new RepositoryFacade."""
def __enter__(self):
self.__init__()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Implementation note - see:
https://stackoverflow.com/questions/22417323/
how-do-enter-and-exit-work-in-python-decorator-classes
"""
self.dispose()
def dispose(self):
"""Close the connection for disposal of the RepositoryFacade."""
raise NotImplementedError()
class Repository:
"""Abstract Repository class
The docstrings here will indicate the intention of the functions and their
arguments. Child classes can extend these in part, requiring new arguments.
The base class remains agnostic as to return types as this will vary across
databases.
The exception is the exists() function, which is implemented here.
"""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __enter__(self):
self.__init__(**self._kwargs)
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Implementation note - see:
https://stackoverflow.com/questions/22417323/
how-do-enter-and-exit-work-in-python-decorator-classes
"""
self.dispose()
def add(self, item):
"""Add one or more items to the database.
Args:
items: an object or list of objects to add.
"""
raise NotImplementedError()
def all(self, projection=None):
"""Get all records in the database.
Args:
projection: List, optional, of attributes to project.
"""
raise NotImplementedError()
def commit(self):
"""Save changes to the database."""
raise NotImplementedError()
def count(self):
"""Get a count of how many records are in this table/collection."""
raise NotImplementedError()
def delete(self, items=None, **kwargs):
"""Delete item(s) from the database.
May either specify an object or list of objects to delete, or
keyword arguments for primary key identification.
Args:
items: an object or list of objects to delete.
kwargs: can specify primary key attribute name(s) and value(s).
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this Repository."""
raise NotImplementedError()
def exists(self, **kwargs):
"""Check if a record exists.
Pass the primary key values in as keyword arguments.
Returns:
Boolean indicating if the record exists.
"""
# NOTE: to project here or not?
return len(list(self.search(**kwargs))) > 0
def get(self, expect=True, projection=None, **kwargs):
"""Get an item from the database.
Primary key values need to be passed as keyword arguments.
Arguments:
expect: Bool, whether to throw an error if not found.
projection: List, optional, of attributes to project.
kwargs: can specify the primary key attribute name(s) and value(s).
"""
raise NotImplementedError()
def search(self, projection=None, **kwargs):
"""Search for records in the database.
Specify the search attributes and values as keyword arguments.
NOTE: This must return a generator or list of records - this is
necessary for the default implementation of the exists() method. Any
change to this means you should re-implement exists().
Args:
projection: List, optional, of attributes to project.
kwargs: attribute name(s) and value(s) to search on.
Returns:
Generator or list of records.
"""
raise NotImplementedError()
| 32.532468 | 80 | 0.654491 |
383ec3dcd0a4834a3f259dc1042d4aad60a145e4 | 525 | py | Python | olamundo.py/exercicios/ex011.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios/ex011.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios/ex011.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | #Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área e a quantidade de tinta
# necessária para pintá-la, sabendo que cada litro de tinta pinta uma área de 2 metros quadrados.
largura = float(input('Informe a largura da parede: '))
altura = float(input('Informe a altura da parede: '))
area = largura * altura
tinta = area / 2
print('Sua parede tem a dimensão {}x{} e sua área é de {}m².\nPara pintar essa parede, você precisará de {}l de tinta.'.format(largura, altura, area, tinta))
| 52.5 | 157 | 0.731429 |
11a19d9ede3e9306325e98e9f8fcdeb20a0e8362 | 10,657 | py | Python | starks/fft.py | computablelabs/starks | f8e45d56c3934a1cf24220b58e073434379e3d78 | [
"MIT"
] | 9 | 2018-11-30T18:53:54.000Z | 2021-11-15T20:59:08.000Z | starks/fft.py | computablelabs/starks | f8e45d56c3934a1cf24220b58e073434379e3d78 | [
"MIT"
] | 13 | 2018-11-30T00:13:49.000Z | 2019-04-04T20:14:29.000Z | starks/fft.py | computablelabs/starks | f8e45d56c3934a1cf24220b58e073434379e3d78 | [
"MIT"
] | 1 | 2020-05-14T20:35:48.000Z | 2020-05-14T20:35:48.000Z | from typing import List
from starks.numbertype import Field
from starks.numbertype import FieldElement
from starks.numbertype import Vector
from starks.numbertype import Poly
from starks.polynomial import polynomials_over
from starks.modp import IntegersModP
def int_to_bin_string(i):
if i == 0:
return "0"
s = ''
while i:
if i & 1 == 1:
s = "1" + s
else:
s = "0" + s
i //= 2
return s
class FFT(object):
"""Abstract class that specifies a FFT solver. The additive FFT parts are implemented based on
Shuhong Gao and Todd D. Mateer. Additive fast fourier transforms over finite fields"""
def __init__(self):
raise NotImplementedError
def fft(self, poly: Poly) -> List[FieldElement]:
"""The FFT efficiently evaluates a polynomial on many field elements."""
raise NotImplementedError
def inv_fft(self, values: List[FieldElement]) -> Poly:
"""Converts a polynomial represented as evaluations on m points to coefficients."""
raise NotImplementedError
class Additive_FFT(FFT):
def __init__(self, field):
self.field = field
# this is the implementation of Taylor Expansion
def Taylor_Expansion(self, Polys, n):
# Let F be any field of characteristic two, t > 1 any integer, so we consider as 2
# if n <= t then return the original function
if n <= 2:
return Polys
# Find k such that t * 2^k < n ≤ 2 * t * 2^k
for x in range(n):
if 2**(x+1) < n and 2**(x+2) >= n:
k = x
# Split f(x) into three blocks f0, f1, and f2 where f(x) = f0(x) + x^{t * 2^k} (f1(x) + x ^{(t-1) * 2^k} * f2(x))
polysOver = polynomials_over(IntegersModP(2))
list_f0 = []
for i in range(2**(k+1)):
if i > Polys.poly.degree():
list_f0.append(0)
else:
list_f0.append(int(str(Polys.poly.coefficients[i])[0]))
list_f1 = []
for i in range(2**(k)):
if 2**(k+1)+i > Polys.poly.degree():
list_f1.append(0)
else:
list_f1.append(int(str(Polys.poly.coefficients[2**(k+1)+i])[0]))
list_f2 = []
for i in range(2**k):
if 2**(k+1)+2**k+i > Polys.poly.degree():
list_f2.append(0)
else:
list_f2.append(int(str(Polys.poly.coefficients[2**(k+1)+2**k+i])[0]))
f0 = self.field(polysOver(list_f0))
f1 = self.field(polysOver(list_f1))
f2 = self.field(polysOver(list_f2))
# h = f1+f2, g0 = f0 + x^{2^k} * h, and g1 = h + x^{(t-1) * 2^k} * f2
h = f1+f2
twoK = []
for i in range(2**(k)):
twoK.append(0)
twoK.append(1)
f_twoK = self.field(polysOver(twoK))
g0 = f0+f_twoK*h
g1 = h+f_twoK*f2
# recursive part
V1 = self.Taylor_Expansion(g0, n/2)
V2 = self.Taylor_Expansion(g1, n/2)
return V1, V2
# this is the implementation of additive FFT
def adfft(self, Polys, m, affine_beta):
# evaluation f in 0 and beta_1
polysOver = polynomials_over(IntegersModP(2))
f1 = self.field(polysOver([0]))
for i in range(Polys.poly.degree()+1):
if str(Polys.poly.coefficients[i])[0] == '1':
f1 = f1 + (affine_beta[0])**i
f2 = self.field(polysOver([0]))
for i in range(Polys.poly.degree()+1):
if str(Polys.poly.coefficients[i])[0] == '1':
f2 = f2
# if m == 1 return f1 and f2
if m == 1:
return f1, f2
# g(x) = f(beta_m * x)
g = self.field(polysOver([0]))
x = self.field(polysOver([0, 1]))
x = x * affine_beta[m-1]
for i in range(Polys.poly.degree()+1):
if str(Polys.poly.coefficients[i])[0] == '1':
g = g + x**i
# g_0 and g_1 are output of taylor expansion over g
g0, g1 = self.Taylor_Expansion(g, 2**m)
"""
gamma_i = beta_i * beta_m ^-1 and delta_i = gamma_i^2 - gamma_i for i = 1, ..., m-1
G = <gamma_1, ..., gamma_{m-1}> and D = <delta_1, ..., delta_{m-1}>
"""
gamma = []
beta_m_I = affine_beta[m-1].inverse()
for i in range(m-1):
gamma.append(affine_beta[i] * beta_m_I);
delta = []
for i in range(m-1):
delta.append(gamma[i]**2-gamma[i])
G = []
for i in range(2**(m-1)):
binary = int_to_bin_string(i)
temp = self.field(polysOver([0]))
for j in range(len(binary)):
if binary[j] == '1':
temp = temp + gamma[j]
G.append(temp)
D = delta
# recursively call additive FFT
u = self.adfft(g0, m-1, D)
v = self.adfft(g1, m-1, D)
w1 = []
w2 = []
for i in range(2**(m-1)):
w1.append(u[i]+G[i]*v[i])
w2.append(w1[i]+v[i])
# outputs are w_0, ..., w_{n-1} where w_i = u_i + G[i] * v_i and w_[i+2^{m-1}] = w_i + v_i where i = 0, ..., 2 ^{m-1}
w = []
for i in range(len(w1)):
w.append(w1[i])
for i in range(len(w2)):
w.append(w2[i])
return w
# this is the inverse of additive FFt which actually is interpolation
def adfft_inverse(self, x, y, m):
# this is th exit condition where the size of x and y is 2 and we need to interpolate a finction that has these two points on it
if m == 1:
if x[0] == x[1]:
return x[0]
else:
return ((y[1]-y[0])/(x[1]-x[0]))*(self.field(polysOver([0, 1]))-x[0])+y[0]
# based on the value of x we can build all beta where beta_i = x_{2^i}
polysOver = polynomials_over(IntegersModP(2))
beta = []
for i in range(m):
beta.append(x[2**i])
"""
gamma_i = beta_i * beta_m ^-1 and delta_i = gamma_i^2 - gamma_i for i = 1, ..., m-1
G = <gamma_1, ..., gamma_{m-1}> and D = <delta_1, ..., delta_{m-1}>
"""
gamma = []
Ibeta = beta[-1].inverse()
for i in range(m-1):
gamma.append(beta[i] * Ibeta);
delta = []
for i in range(m-1):
delta.append(gamma[i]*gamma[i]-gamma[i])
G = []
for i in range(2**(m-1)):
binary = int_to_bin_string(i)
temp = self.field(polysOver([0]))
for j in range(len(binary)):
if binary[j] == '1':
temp = temp + gamma[j]
G.append(temp)
D = delta
# comput u and v where v_i = y_{i+2^{m-1} - y_i and u_i = y_i - G_i * v_i
v = []
u = []
for i in range(2**(m-1)):
v.append(y[i+2**(m-1)]-y[i])
u.append(y[i] - G[i]*v[i])
x1 = []
for i in range(2**(m-1)):
binary = int_to_bin_string(i+1)
temp = self.field(polysOver([0]))
for j in range(len(binary)):
if binary[j] == '1':
temp = temp + D[j]
x1.append(temp)
# the recursion parts of the approach to get g0 and g1
g_0 = self.adfft_inverse(x1, u, m-1)
g_1 = self.adfft_inverse(x1, v, m-1)
# computing g based on g0 and g1 by using taylor expansion and then changing the variables to return it as the output
g = self.field(polysOver([0]))
g_right_tempp = self.field(polysOver([0, 1])) * Ibeta
g_right_temp = g_right_tempp * g_right_tempp - g_right_tempp
g_right = []
g_right.append(self.field(polysOver([1])))
multiplier = []
multiplier.append(self.field(polysOver([0])))
multiplier.append(g_right_tempp)
multiplier.append(self.field(polysOver([1])))
multiplier.append(self.field(polysOver([1]))+g_right_tempp)
for i in range(2**(m-1)):
g_right.append(g_right[-1]*g_right_temp)
for i in range(2**(m-1)):
if i <= g_0.poly.degree():
g0I = int(g_0.poly.coefficients[i])
else:
g0I = 0
if i <= g_1.poly.degree():
g1I = int(g_1.poly.coefficients[i])
else:
g1I = 0
g = g + multiplier[g0I+2**g1I] * g_right[i]
return g
class NonBinaryFFT(FFT):
"""FFT that works for finite fields which don't have characteristic 2."""
def __init__(self, field, root_of_unity):
self.field = field
self.root_of_unity = root_of_unity
self.polysOver = polynomials_over(self.field).factory
def fft(self, poly: Poly) -> List[FieldElement]:
"""Runs FFT algorithm."""
return fft_1d(self.field, poly.coefficients, self.field.p, self.root_of_unity,
inv=False)
def inv_fft(self, values: List[FieldElement]) -> Poly:
"""Performs the inverse fft."""
coeffs = fft_1d(self.field, values, self.field.p, self.root_of_unity,
inv=True)
return self.polysOver(coeffs)
class BinaryFFT(FFT):
"""FFT that works for finite fields of characteristic 2.
Implements basis and algorithm from https://arxiv.org/pdf/1404.3458.pdf"""
def fft(self, poly: Poly) -> List[FieldElement]:
"""Runs FFT algorithm."""
raise NotImplementedError
def inv_fft(self, values: List[FieldElement]) -> Poly:
"""Performs the inverse fft."""
raise NotImplementedError
def _simple_ft(vals: List[FieldElement], roots_of_unity: FieldElement) -> List[FieldElement]:
"""Efficient base case implementation.
The FFT recurses down halves of the list. This method is
called to handle the base case of the fft.
"""
L = len(roots_of_unity)
o = []
for i in range(L):
last = 0
for j in range(L):
last += vals[j] * roots_of_unity[(i * j) % L]
o.append(last)
return o
def _fft(vals: List[FieldElement], roots_of_unity: FieldElement) -> List[FieldElement]:
if len(vals) <= 4:
#return vals
return _simple_ft(vals, roots_of_unity)
L = _fft(vals[::2], roots_of_unity[::2])
R = _fft(vals[1::2], roots_of_unity[::2])
o = [0 for i in vals]
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = y * roots_of_unity[i]
o[i] = (x + y_times_root)
o[i + len(L)] = (x - y_times_root)
return o
def fft_1d(field: Field, vals: List[FieldElement], modulus: int, root_of_unity: FieldElement, inv: bool = False) -> List[FieldElement]:
"""Computes FFT for one dimensional inputs"""
# Build up roots of unity
rootz = [field(1), root_of_unity]
while rootz[-1] != field(1):
rootz.append((rootz[-1] * root_of_unity))
# Fill in vals with zeroes if needed
if len(rootz) > len(vals) + 1:
vals = vals + [0] * (len(rootz) - len(vals) - 1)
if inv:
# Inverse FFT
invlen = pow(len(vals), modulus - 2, modulus)
return [(x * invlen) for x in _fft(vals, rootz[:0:-1])]
else:
# Regular FFT
return _fft(vals, rootz[:-1])
def mul_polys(a: List[FieldElement], b: List[FieldElement], root_of_unity: FieldElement) -> List[FieldElement]:
"""Multiply polynomials by converting to fourier space"""
rootz = [1, root_of_unity]
while rootz[-1] != 1:
rootz.append((rootz[-1] * root_of_unity))
if len(rootz) > len(a) + 1:
a = a + [0] * (len(rootz) - len(a) - 1)
if len(rootz) > len(b) + 1:
b = b + [0] * (len(rootz) - len(b) - 1)
x1 = _fft(a, rootz[:-1])
x2 = _fft(b, rootz[:-1])
return _fft([(v1 * v2) for v1, v2 in zip(x1, x2)], rootz[:0:-1])
| 30.800578 | 135 | 0.595477 |
8144bcbaa5d9ec05a1eca37e5375d74eecb29936 | 1,327 | py | Python | tests/v2/test_incident_team_included_items.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v2/test_incident_team_included_items.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v2/test_incident_team_included_items.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import sys
import unittest
import datadog_api_client.v2
from datadog_api_client.v2.model.user import User
from datadog_api_client.v2.model.user_attributes import UserAttributes
from datadog_api_client.v2.model.user_response_relationships import UserResponseRelationships
from datadog_api_client.v2.model.users_type import UsersType
globals()['User'] = User
globals()['UserAttributes'] = UserAttributes
globals()['UserResponseRelationships'] = UserResponseRelationships
globals()['UsersType'] = UsersType
from datadog_api_client.v2.model.incident_team_included_items import IncidentTeamIncludedItems
class TestIncidentTeamIncludedItems(unittest.TestCase):
"""IncidentTeamIncludedItems unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIncidentTeamIncludedItems(self):
"""Test IncidentTeamIncludedItems"""
# FIXME: construct object with mandatory attributes with example values
# model = IncidentTeamIncludedItems() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.365854 | 108 | 0.77694 |
a5df9e610a08c8f9293fd65b60d8f55b6b542052 | 10,671 | py | Python | tests/core/test_cost_calculation.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 10 | 2021-10-02T18:33:56.000Z | 2021-11-14T17:10:48.000Z | tests/core/test_cost_calculation.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 14 | 2021-10-07T22:10:15.000Z | 2021-12-21T09:13:49.000Z | tests/core/test_cost_calculation.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 6 | 2021-10-29T19:36:59.000Z | 2021-12-19T19:52:57.000Z | import logging
import pathlib
import time
import pytest
from clvm_tools import binutils
from stai.consensus.condition_costs import ConditionCost
from stai.consensus.cost_calculator import NPCResult
from stai.full_node.bundle_tools import simple_solution_generator
from stai.full_node.mempool_check_conditions import get_name_puzzle_conditions, get_puzzle_and_solution_for_coin
from stai.types.blockchain_format.program import Program, SerializedProgram
from stai.types.generator_types import BlockGenerator
from stai.wallet.puzzles import p2_delegated_puzzle_or_hidden_puzzle
from tests.setup_nodes import test_constants
from .make_block_generator import make_block_generator
BURN_PUZZLE_HASH = b"0" * 32
SMALL_BLOCK_GENERATOR = make_block_generator(1)
log = logging.getLogger(__name__)
def large_block_generator(size):
# make a small block and hash it
# use this in the name for the cached big block
# the idea is, if the algorithm for building the big block changes,
# the name of the cache file will also change
name = SMALL_BLOCK_GENERATOR.program.get_tree_hash().hex()[:16]
my_dir = pathlib.Path(__file__).absolute().parent
hex_path = my_dir / f"large-block-{name}-{size}.hex"
try:
with open(hex_path) as f:
hex_str = f.read()
return bytes.fromhex(hex_str)
except FileNotFoundError:
generator = make_block_generator(size)
blob = bytes(generator.program)
# TODO: Re-enable large-block*.hex but cache in ~/.stai/subdir
# with open(hex_path, "w") as f:
# f.write(blob.hex())
return blob
class TestCostCalculation:
@pytest.mark.asyncio
async def test_basics(self, softfork_height, bt):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph and coin.amount == 250000000000:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
program: BlockGenerator = simple_solution_generator(spend_bundle)
npc_result: NPCResult = get_name_puzzle_conditions(
program,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
height=softfork_height,
)
assert npc_result.error is None
assert len(bytes(program.program)) == 433
coin_name = npc_result.npc_list[0].coin_name
error, puzzle, solution = get_puzzle_and_solution_for_coin(
program, coin_name, test_constants.MAX_BLOCK_COST_CLVM
)
assert error is None
assert (
npc_result.cost
== 404560
+ ConditionCost.CREATE_COIN.value
+ ConditionCost.AGG_SIG.value
+ len(bytes(program.program)) * test_constants.COST_PER_BYTE
)
# Create condition + agg_sig_condition + length + cpu_cost
assert (
npc_result.cost
== ConditionCost.CREATE_COIN.value
+ ConditionCost.AGG_SIG.value
+ len(bytes(program.program)) * test_constants.COST_PER_BYTE
+ 404560 # clvm cost
)
@pytest.mark.asyncio
async def test_mempool_mode(self, softfork_height, bt):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
pk = bytes.fromhex(
"88bc9360319e7c54ab42e19e974288a2d7a817976f7633f4b43f36ce72074e59c4ab8ddac362202f3e366f0aebbb6280"
)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(pk)
disassembly = binutils.disassemble(puzzle)
program = SerializedProgram.from_bytes(
binutils.assemble(
f"(q ((0x3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa {disassembly} 300"
f" (() (q . ((65 '00000000000000000000000000000000' 0x0cbba106e000))) ()))))"
).as_bin()
)
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=True,
height=softfork_height,
)
assert npc_result.error is not None
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
height=softfork_height,
)
assert npc_result.error is None
coin_name = npc_result.npc_list[0].coin_name
error, puzzle, solution = get_puzzle_and_solution_for_coin(
generator, coin_name, test_constants.MAX_BLOCK_COST_CLVM
)
assert error is None
@pytest.mark.asyncio
async def test_clvm_mempool_mode(self, softfork_height):
block = Program.from_bytes(bytes(SMALL_BLOCK_GENERATOR.program))
disassembly = binutils.disassemble(block)
# this is a valid generator program except the first clvm
# if-condition, that depends on executing an unknown operator
# ("0xfe"). In mempool mode, this should fail, but in non-mempool
# mode, the unknown operator should be treated as if it returns ().
program = SerializedProgram.from_bytes(binutils.assemble(f"(i (0xfe (q . 0)) (q . ()) {disassembly})").as_bin())
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=True,
)
assert npc_result.error is not None
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
height=softfork_height,
)
assert npc_result.error is None
@pytest.mark.asyncio
async def test_tx_generator_speed(self, softfork_height):
LARGE_BLOCK_COIN_CONSUMED_COUNT = 687
generator_bytes = large_block_generator(LARGE_BLOCK_COIN_CONSUMED_COUNT)
program = SerializedProgram.from_bytes(generator_bytes)
start_time = time.time()
generator = BlockGenerator(program, [], [])
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
height=softfork_height,
)
end_time = time.time()
duration = end_time - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == LARGE_BLOCK_COIN_CONSUMED_COUNT
log.info(f"Time spent: {duration}")
assert duration < 1
@pytest.mark.asyncio
async def test_clvm_max_cost(self, softfork_height):
block = Program.from_bytes(bytes(SMALL_BLOCK_GENERATOR.program))
disassembly = binutils.disassemble(block)
# this is a valid generator program except the first clvm
# if-condition, that depends on executing an unknown operator
# ("0xfe"). In mempool mode, this should fail, but in non-mempool
# mode, the unknown operator should be treated as if it returns ().
# the CLVM program has a cost of 391969
program = SerializedProgram.from_bytes(
binutils.assemble(f"(i (softfork (q . 10000000)) (q . ()) {disassembly})").as_bin()
)
# ensure we fail if the program exceeds the cost
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
10000000,
cost_per_byte=0,
mempool_mode=False,
height=softfork_height,
)
assert npc_result.error is not None
assert npc_result.cost == 0
# raise the max cost to make sure this passes
# ensure we pass if the program does not exceeds the cost
npc_result = get_name_puzzle_conditions(
generator, 23000000, cost_per_byte=0, mempool_mode=False, height=softfork_height
)
assert npc_result.error is None
assert npc_result.cost > 10000000
@pytest.mark.asyncio
async def test_standard_tx(self):
# this isn't a real public key, but we don't care
public_key = bytes.fromhex(
"af949b78fa6a957602c3593a3d6cb7711e08720415dad83" "1ab18adacaa9b27ec3dda508ee32e24bc811c0abc5781ae21"
)
puzzle_program = SerializedProgram.from_bytes(p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(public_key))
conditions = binutils.assemble(
"((51 0x699eca24f2b6f4b25b16f7a418d0dc4fc5fce3b9145aecdda184158927738e3e 10)"
" (51 0x847bb2385534070c39a39cc5dfdc7b35e2db472dc0ab10ab4dec157a2178adbf 0x00cbba106df6))"
)
solution_program = SerializedProgram.from_bytes(
p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions)
)
time_start = time.time()
total_cost = 0
for i in range(0, 1000):
cost, result = puzzle_program.run_with_cost(test_constants.MAX_BLOCK_COST_CLVM, solution_program)
total_cost += cost
time_end = time.time()
duration = time_end - time_start
log.info(f"Time spent: {duration}")
assert duration < 3
| 38.945255 | 120 | 0.664324 |
d5a95cefe97edd354bdc78b420e18d3da3088cc8 | 399 | py | Python | goodpoop/api/tests/factories.py | camilamaia/good-poop-api | 4c1e4ed460387322b14ddd766c1de48d678c9186 | [
"MIT"
] | 1 | 2019-03-06T14:31:55.000Z | 2019-03-06T14:31:55.000Z | goodpoop/api/tests/factories.py | camilamaia/good-poop-api | 4c1e4ed460387322b14ddd766c1de48d678c9186 | [
"MIT"
] | 3 | 2019-02-04T12:43:54.000Z | 2019-02-11T16:35:11.000Z | goodpoop/api/tests/factories.py | camilamaia/good-poop-api | 4c1e4ed460387322b14ddd766c1de48d678c9186 | [
"MIT"
] | null | null | null | import factory
from goodpoop.api import models
class AccountFactory(factory.Factory):
class Meta:
model = models.Account
class RestroomFactory(factory.Factory):
class Meta:
model = models.Restroom
class CheckInFactory(factory.Factory):
class Meta:
model = models.CheckIn
class ReviewFactory(factory.Factory):
class Meta:
model = models.Review
| 17.347826 | 39 | 0.701754 |
6c42fbe20c0a2c51fbfa10087875675009c01310 | 10,377 | py | Python | pymatgen/analysis/defects/generators.py | tamuhey/pymatgen | cf1793f0af59844ea9222d973212e3ab83e58209 | [
"MIT"
] | 1 | 2018-10-10T02:37:10.000Z | 2018-10-10T02:37:10.000Z | pymatgen/analysis/defects/generators.py | tamuhey/pymatgen | cf1793f0af59844ea9222d973212e3ab83e58209 | [
"MIT"
] | null | null | null | pymatgen/analysis/defects/generators.py | tamuhey/pymatgen | cf1793f0af59844ea9222d973212e3ab83e58209 | [
"MIT"
] | 2 | 2020-04-30T14:19:12.000Z | 2021-07-30T08:24:48.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import six
import logging
from abc import ABCMeta, abstractmethod
from monty.json import MSONable
from pymatgen.core import PeriodicSite
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.defects.core import Vacancy, Interstitial, Substitution
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.defects.utils import StructureMotifInterstitial, TopographyAnalyzer
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
"""
This module defines classes to generate point defect structures
"""
logger = logging.getLogger(__name__)
class DefectGenerator(six.with_metaclass(ABCMeta, MSONable)):
"""
Abstract class for point defects
Implements generator pattern
"""
def __iter__(self):
"""
Return self as this should be an iterator
"""
return self
@abstractmethod
def __next__(self):
"""
Abstract method to return defects
"""
return
class VacancyGenerator(DefectGenerator):
"""
Simple generator for vacancies based on periodically
equivalent sites
"""
def __init__(self, structure, include_bv_charge=False):
"""
Initializes a Vacancy Generator
Args:
structure(Structure): pymatgen structure object
"""
self.structure = structure
self.include_bv_charge = include_bv_charge
# Find equivalent site list
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_site_seq = list(self.symm_structure.equivalent_sites)
self.struct_valences = None
if self.include_bv_charge:
bv = BVAnalyzer()
self.struct_valences = bv.get_valences(self.structure)
def __next__(self):
"""
Returns the next vacancy in the sequence or
raises StopIteration
"""
if len(self.equiv_site_seq) > 0:
vac_site = self.equiv_site_seq.pop(0)
charge = 0.0
if self.struct_valences:
site_index = self.structure.get_sites_in_sphere(vac_site[0].coords, 0.1, include_index=True)[0][2]
charge = -1 * self.struct_valences[site_index]
return Vacancy(self.structure, vac_site[0], charge=charge)
else:
raise StopIteration
class SubstitutionGenerator(DefectGenerator):
"""
Simple generator for substitution based on periodically
equivalent sites
"""
def __init__(self, structure, element):
"""
Initializes a Substitution Generator
note: an Antisite is considered a type of substitution
Args:
structure(Structure): pymatgen structure object
element (str or Element or Specie): element for the substitution
"""
self.structure = structure
self.element = element
# Find equivalent site list
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_sub = []
for equiv_site_set in list(self.symm_structure.equivalent_sites):
vac_site = equiv_site_set[0]
if isinstance(element, str): # make sure you compare with specie symbol or Element type
vac_specie = vac_site.specie.symbol
else:
vac_specie = vac_site.specie
if element != vac_specie:
defect_site = PeriodicSite(element, vac_site.coords, structure.lattice, coords_are_cartesian=True)
sub = Substitution(structure, defect_site)
self.equiv_sub.append(sub)
def __next__(self):
"""
Returns the next Substitution in the sequence or
raises StopIteration
"""
if len(self.equiv_sub) > 0:
return self.equiv_sub.pop(0)
else:
raise StopIteration
class InterstitialGenerator(DefectGenerator):
"""
Generator for interstitials at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm is called InFiT
(Interstitialcy Finding Tool), it was introducted by
Nils E. R. Zimmermann, Matthew K. Horton, Anubhav Jain,
and Maciej Haranczyk (Front. Mater., 4, 34, 2017),
and it is used by the Python Charged Defect Toolkit
(PyCDT: D. Broberg et al., Comput. Phys. Commun., in press, 2018).
"""
def __init__(self, structure, element):
"""
Initializes an Interstitial generator using structure motifs
Args:
structure (Structure): pymatgen structure object
element (str or Element or Specie): element for the interstitial
"""
self.structure = structure
self.element = element
interstitial_finder = StructureMotifInterstitial(self.structure, self.element)
self.defect_sites = list(interstitial_finder.enumerate_defectsites())
# for multiplicity, neccessary to get prim_structure
spa = SpacegroupAnalyzer(self.structure, symprec=1e-2)
prim_struct = spa.get_primitive_standard_structure()
conv_prim_rat = int(self.structure.num_sites / prim_struct.num_sites)
self.multiplicities = [
int(interstitial_finder.get_defectsite_multiplicity(def_ind) / conv_prim_rat)
for def_ind in range(len(self.defect_sites))
]
self.count_def = 0 # for counting the index of the generated defect
def __next__(self):
"""
Returns the next interstitial or
raises StopIteration
"""
if len(self.defect_sites) > 0:
int_site = self.defect_sites.pop(0)
mult = self.multiplicities.pop(0)
self.count_def += 1
site_name = 'InFiT' + str(self.count_def)
return Interstitial(self.structure, int_site, site_name=site_name, multiplicity=mult)
else:
raise StopIteration
class VoronoiInterstitialGenerator(DefectGenerator):
"""
Generator for interstitials based on a simple Voronoi analysis
"""
def __init__(self, structure, element):
"""
Initializes an Interstitial generator using Voronoi sites
Args:
structure (Structure): pymatgen structure object
element (str or Element or Specie): element for the interstitial
"""
self.structure = structure
self.element = element
framework = list(self.structure.symbol_set)
get_voronoi = TopographyAnalyzer(self.structure, framework, [], check_volume=False)
get_voronoi.cluster_nodes()
get_voronoi.remove_collisions()
# trim equivalent nodes with symmetry analysis
struct_to_trim = self.structure.copy()
for poss_inter in get_voronoi.vnodes:
struct_to_trim.append(self.element, poss_inter.frac_coords, coords_are_cartesian=False)
symmetry_finder = SpacegroupAnalyzer(struct_to_trim, symprec=1e-1)
equiv_sites_list = symmetry_finder.get_symmetrized_structure().equivalent_sites
self.equiv_site_seq = []
for poss_site_list in equiv_sites_list:
if poss_site_list[0] not in self.structure:
self.equiv_site_seq.append(poss_site_list)
self.count_def = 0 # for counting the index of the generated defect
def __next__(self):
"""
Returns the next interstitial or
raises StopIteration
"""
if len(self.equiv_site_seq) > 0:
inter_site_list = self.equiv_site_seq.pop(0)
self.count_def += 1
site_name = 'Voronoi' + str(self.count_def)
return Interstitial(
self.structure, inter_site_list[0], site_name=site_name, multiplicity=len(inter_site_list))
else:
raise StopIteration
class SimpleChargeGenerator(DefectGenerator):
"""
Does an extremely simple/limited charge generation scheme (only one charge generated)
for vacancies: use bond valence method to assign oxidation states and consider
negative of the vacant site's oxidation state as single charge to try
for antisites and subs: use bond valence method to assign oxidation states and consider
negative of the vacant site's oxidation state as single charge to try +
added to likely charge of substitutional site (closest to zero)
for interstitial: charge zero
"""
def __init__(self, defect):
"""
Args:
defect(Defect): pymatgen Defect object
"""
self.defect = defect
try:
bv = BVAnalyzer()
struct_valences = bv.get_valences(self.defect.bulk_structure)
site_index = self.defect.bulk_structure.get_sites_in_sphere(
self.defect.site.coords, 0.1, include_index=True)[0][2]
def_site_valence = struct_valences[site_index]
except Exception: # sometimes valences cant be assigned
def_site_valence = 0
if isinstance(defect, Vacancy):
self.charges = [-1 * def_site_valence]
elif isinstance(defect, Substitution):
#(minimize difference with host site specie)
probable_chgs = [ox - def_site_valence for ox in self.defect.site.specie.oxidation_states]
self.charges = [min(probable_chgs, key=abs)]
elif isinstance(defect, Interstitial):
self.charges = [0]
else:
raise ValueError("Defect Type not recognized.")
def __next__(self):
"""
Returns the next defect type with the correct charge appended
raises StopIteration
"""
if len(self.charges) > 0:
charge = self.charges.pop(0)
defect = self.defect.copy()
defect.set_charge(charge)
return defect
else:
raise StopIteration
| 35.782759 | 114 | 0.655103 |
dcd008107f4e6509d56300050afd9fa647319664 | 3,067 | py | Python | dtc/message_types/position_update.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 15 | 2020-04-26T05:25:53.000Z | 2022-02-11T19:38:42.000Z | dtc/message_types/position_update.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 2 | 2021-01-08T19:58:08.000Z | 2021-11-29T06:08:48.000Z | dtc/message_types/position_update.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 4 | 2020-11-23T13:38:01.000Z | 2021-12-27T13:21:06.000Z |
from dtc.enums.message_types import MessageTypes
from lib.base_message_type import BaseMessageType
class PositionUpdate(BaseMessageType):
def __init__(self,
request_id=None,
total_number_messages=None,
message_number=None,
symbol=None,
exchange=None,
quantity=None,
average_price=None,
position_identifier=None,
trade_account=None,
no_positions=None,
unsolicited=None,
margin_requirement=None,
entry_date_time=None):
self.Type = MessageTypes.POSITION_UPDATE
self.RequestID = request_id
self.TotalNumberMessages = total_number_messages
self.MessageNumber = message_number
self.Symbol = symbol
self.Exchange = exchange
self.Quantity = quantity
self.AveragePrice = average_price
self.PositionIdentifier = position_identifier
self.TradeAccount = trade_account
self.NoPositions = no_positions
self.Unsolicited = unsolicited
self.MarginRequirement = margin_requirement
self.EntryDateTime = entry_date_time
@staticmethod
def from_message_short(message_obj):
packet = message_obj.get('F')
return PositionUpdate(
request_id=packet[0],
total_number_messages=packet[1],
message_number=packet[2],
symbol=packet[3],
exchange=packet[4],
quantity=packet[5],
average_price=packet[6],
position_identifier=packet[7],
trade_account=packet[8],
no_positions=packet[9],
unsolicited=packet[10],
margin_requirement=packet[11],
entry_date_time=packet[12]
)
@staticmethod
def from_message_long(message_obj):
return PositionUpdate(
request_id=message_obj.get('RequestID'),
total_number_messages=message_obj.get('TotalNumberMessages'),
message_number=message_obj.get('MessageNumber'),
symbol=message_obj.get('Symbol'),
exchange=message_obj.get('Exchange'),
quantity=message_obj.get('Quantity'),
average_price=message_obj.get('AveragePrice'),
position_identifier=message_obj.get('PositionIdentifier'),
trade_account=message_obj.get('TradeAccount'),
no_positions=message_obj.get('NoPositions'),
unsolicited=message_obj.get('Unsolicited'),
margin_requirement=message_obj.get('MarginRequirement'),
entry_date_time=message_obj.get('EntryDateTime')
)
@staticmethod
def from_message(message_obj):
if 'F' in message_obj:
return PositionUpdate.from_message_short(message_obj)
else:
return PositionUpdate.from_message_long(message_obj)
@staticmethod
def get_message_type_name():
return "PositionUpdate"
| 36.951807 | 74 | 0.61852 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.